From 8362868578f6a7cb841645acc3edfe3c7296f25f Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 30 Dec 2024 09:02:36 -0700 Subject: [PATCH 01/32] CP/DP Split: Remove NGINX manager and deployment (#2936) Removing the nginx runtime manager and deployment container since nginx will live in its own pod managed by agent. Temporarily saving the nginx deployment and service for future use. Updated the control plane liveness probe to return true once it's processed all resources, instead of after it's written config to nginx (since nginx may not be started yet in the future architecture). --- build/Dockerfile | 26 +- charts/nginx-gateway-fabric/README.md | 3 +- .../templates/deployment.yaml | 155 ------- .../nginx-gateway-fabric/templates/scc.yaml | 2 +- .../templates/service.yaml | 31 +- .../tmp/tmp-nginx-deployment.yaml | 169 +++++++ .../tmp/tmp-nginx-service.yaml | 35 ++ .../nginx-gateway-fabric/values.schema.json | 29 +- charts/nginx-gateway-fabric/values.yaml | 14 +- cmd/gateway/commands.go | 2 +- cmd/gateway/initialize.go | 4 +- cmd/gateway/initialize_test.go | 4 +- config/tests/static-deployment.yaml | 101 ----- deploy/aws-nlb/deploy.yaml | 113 +---- deploy/azure/deploy.yaml | 110 +---- deploy/default/deploy.yaml | 110 +---- deploy/experimental-nginx-plus/deploy.yaml | 123 +----- deploy/experimental/deploy.yaml | 110 +---- deploy/nginx-plus/deploy.yaml | 123 +----- deploy/nodeport/deploy.yaml | 110 +---- deploy/openshift/deploy.yaml | 110 +---- .../snippets-filters-nginx-plus/deploy.yaml | 123 +----- deploy/snippets-filters/deploy.yaml | 110 +---- go.mod | 2 - go.sum | 4 - .../manager.go => framework/file/file.go} | 73 +-- .../file/file_suite_test.go | 0 internal/framework/file/file_test.go | 155 +++++++ .../file/filefakes/fake_osfile_manager.go | 156 +------ .../file/os_filemanager.go | 0 internal/mode/static/handler.go | 160 +------ internal/mode/static/handler_test.go | 290 ++---------- internal/mode/static/health.go | 27 +- internal/mode/static/health_test.go | 8 +- internal/mode/static/manager.go | 106 ++--- .../mode/static/metrics/collectors/nginx.go | 48 -- .../metrics/collectors/nginx_runtime.go | 26 +- internal/mode/static/nginx/agent/agent.go | 36 ++ .../agent/agentfakes/fake_nginx_updater.go | 106 +++++ internal/mode/static/nginx/agent/doc.go | 4 + .../config/configfakes/fake_generator.go | 2 +- internal/mode/static/nginx/config/convert.go | 58 --- .../mode/static/nginx/config/convert_test.go | 78 ---- .../mode/static/nginx/config/generator.go | 10 +- .../static/nginx/config/generator_test.go | 75 ++-- .../mode/static/nginx/config/main_config.go | 2 +- internal/mode/static/nginx/config/version.go | 19 - .../static/nginx/config/version_template.go | 12 - .../mode/static/nginx/config/version_test.go | 20 - .../fake_clear_folders_osfile_manager.go | 191 -------- .../nginx/file/filefakes/fake_dir_entry.go | 301 ------------- .../nginx/file/filefakes/fake_manager.go | 116 ----- internal/mode/static/nginx/file/folders.go | 56 --- .../mode/static/nginx/file/folders_test.go | 129 ------ .../mode/static/nginx/file/manager_test.go | 225 ---------- internal/mode/static/nginx/runtime/clients.go | 39 -- internal/mode/static/nginx/runtime/manager.go | 284 ------------ .../mode/static/nginx/runtime/manager_test.go | 403 ----------------- .../nginx/runtime/runtime_suite_test.go | 14 - .../runtime/runtimefakes/fake_manager.go | 417 ------------------ .../runtimefakes/fake_metrics_collector.go | 137 ------ .../fake_nginx_config_verifier.go | 269 ----------- .../runtimefakes/fake_nginx_plus_client.go | 370 ---------------- .../runtimefakes/fake_process_handler.go | 273 ------------ .../runtimefakes/fake_verify_client.go | 269 ----------- internal/mode/static/nginx/runtime/verify.go | 155 ------- .../mode/static/nginx/runtime/verify_test.go | 186 -------- 67 files changed, 720 insertions(+), 6308 deletions(-) create mode 100644 charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml create mode 100644 charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml rename internal/{mode/static/nginx/file/manager.go => framework/file/file.go} (53%) rename internal/{mode/static/nginx => framework}/file/file_suite_test.go (100%) create mode 100644 internal/framework/file/file_test.go rename internal/{mode/static/nginx => framework}/file/filefakes/fake_osfile_manager.go (72%) rename internal/{mode/static/nginx => framework}/file/os_filemanager.go (100%) delete mode 100644 internal/mode/static/metrics/collectors/nginx.go create mode 100644 internal/mode/static/nginx/agent/agent.go create mode 100644 internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go create mode 100644 internal/mode/static/nginx/agent/doc.go delete mode 100644 internal/mode/static/nginx/config/convert.go delete mode 100644 internal/mode/static/nginx/config/convert_test.go delete mode 100644 internal/mode/static/nginx/config/version.go delete mode 100644 internal/mode/static/nginx/config/version_template.go delete mode 100644 internal/mode/static/nginx/config/version_test.go delete mode 100644 internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go delete mode 100644 internal/mode/static/nginx/file/filefakes/fake_dir_entry.go delete mode 100644 internal/mode/static/nginx/file/filefakes/fake_manager.go delete mode 100644 internal/mode/static/nginx/file/folders.go delete mode 100644 internal/mode/static/nginx/file/folders_test.go delete mode 100644 internal/mode/static/nginx/file/manager_test.go delete mode 100644 internal/mode/static/nginx/runtime/clients.go delete mode 100644 internal/mode/static/nginx/runtime/manager.go delete mode 100644 internal/mode/static/nginx/runtime/manager_test.go delete mode 100644 internal/mode/static/nginx/runtime/runtime_suite_test.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go delete mode 100644 internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go delete mode 100644 internal/mode/static/nginx/runtime/verify.go delete mode 100644 internal/mode/static/nginx/runtime/verify_test.go diff --git a/build/Dockerfile b/build/Dockerfile index d00d848668..b100acfaf0 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -11,25 +11,8 @@ RUN make build FROM golang:1.24 AS ca-certs-provider -FROM alpine:3.21 AS capabilizer -RUN apk add --no-cache libcap - -FROM capabilizer AS local-capabilizer -COPY ./build/out/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer AS container-capabilizer -COPY --from=builder /go/src/github.com/nginx/nginx-gateway-fabric/build/out/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer AS goreleaser-capabilizer -ARG TARGETARCH -COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - FROM scratch AS common -# CA certs are needed for telemetry report and NGINX Plus usage report features, so that -# NGF can verify the server's certificate. +# CA certs are needed for telemetry report so that NGF can verify the server's certificate. COPY --from=ca-certs-provider --link /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ USER 102:1001 ARG BUILD_AGENT @@ -37,10 +20,11 @@ ENV BUILD_AGENT=${BUILD_AGENT} ENTRYPOINT [ "/usr/bin/gateway" ] FROM common AS container -COPY --from=container-capabilizer /usr/bin/gateway /usr/bin/ +COPY --from=builder /go/src/github.com/nginxinc/nginx-gateway-fabric/build/out/gateway /usr/bin/ FROM common AS local -COPY --from=local-capabilizer /usr/bin/gateway /usr/bin/ +COPY ./build/out/gateway /usr/bin/ FROM common AS goreleaser -COPY --from=goreleaser-capabilizer /usr/bin/gateway /usr/bin/ +ARG TARGETARCH +COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 3dce9a4ac7..096898bbde 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -268,6 +268,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginx.image.tag` | | string | `"edge"` | | `nginx.lifecycle` | The lifecycle of the nginx container. | object | `{}` | | `nginx.plus` | Is NGINX Plus image being used | bool | `false` | +| `nginx.securityContext.allowPrivilegeEscalation` | Some environments may need this set to true in order for the control plane to successfully reload NGINX. | bool | `false` | | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.endpoint` | The endpoint of the NGINX Plus usage reporting server. Default: product.connect.nginx.com | string | `""` | @@ -296,7 +297,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginxGateway.readinessProbe.port` | Port in which the readiness endpoint is exposed. | int | `8081` | | `nginxGateway.replicaCount` | The number of replicas of the NGINX Gateway Fabric Deployment. | int | `1` | | `nginxGateway.resources` | The resource requests and/or limits of the nginx-gateway container. | object | `{}` | -| `nginxGateway.securityContext.allowPrivilegeEscalation` | Some environments may need this set to true in order for the control plane to successfully reload NGINX. | bool | `false` | +| `nginxGateway.service.annotations` | The annotations of the NGINX Gateway Fabric control plane service. | object | `{}` | | `nginxGateway.snippetsFilters.enable` | Enable SnippetsFilters feature. SnippetsFilters allow inserting NGINX configuration into the generated NGINX config for HTTPRoute and GRPCRoute resources. | bool | `false` | | `nodeSelector` | The nodeSelector of the NGINX Gateway Fabric pod. | object | `{}` | | `service.annotations` | The annotations of the NGINX Gateway Fabric service. | object | `{}` | diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 25d0547f3a..204e4b5c3b 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -39,43 +39,6 @@ spec: topologySpreadConstraints: {{- toYaml .Values.topologySpreadConstraints | nindent 8 }} {{- end }} - initContainers: - - name: init - image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} - imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - {{- if .Values.nginx.plus }} - - --source - - /includes/mgmt.conf - - --nginx-plus - {{- end }} - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-includes-bootstrap - mountPath: /includes - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes containers: - args: - static-mode @@ -177,99 +140,21 @@ spec: securityContext: seccompProfile: type: RuntimeDefault - allowPrivilegeEscalation: {{ .Values.nginxGateway.securityContext.allowPrivilegeEscalation }} capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsUser: 102 runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes {{- with .Values.nginxGateway.extraVolumeMounts -}} {{ toYaml . | nindent 8 }} {{- end }} - - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} - imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} - name: nginx - {{- if .Values.nginx.lifecycle }} - lifecycle: - {{- toYaml .Values.nginx.lifecycle | nindent 10 }} - {{- end }} - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-cache - mountPath: /var/cache/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - {{- if .Values.nginx.plus }} - - name: nginx-lib - mountPath: /var/lib/nginx/state - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license - mountPath: /etc/nginx/license.jwt - subPath: license.jwt - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - mountPath: /etc/nginx/certs-bootstrap/ - {{- end }} - {{- end }} - {{- with .Values.nginx.extraVolumeMounts -}} - {{ toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.nginx.debug }} - command: - - "/bin/sh" - args: - - "-c" - - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" - {{- end }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} {{- if .Values.affinity }} affinity: {{- toYaml .Values.affinity | nindent 8 }} {{- end }} serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} - shareProcessNamespace: true securityContext: fsGroup: 1001 runAsNonRoot: true @@ -281,46 +166,6 @@ spec: nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} {{- end }} - volumes: - - name: nginx-conf - emptyDir: {} - - name: nginx-stream-conf - emptyDir: {} - - name: nginx-main-includes - emptyDir: {} - - name: nginx-secrets - emptyDir: {} - - name: nginx-run - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-includes - emptyDir: {} - - name: nginx-includes-bootstrap - configMap: - name: nginx-includes-bootstrap - {{- if .Values.nginx.plus }} - - name: nginx-lib - emptyDir: {} - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license - secret: - secretName: {{ .Values.nginx.usage.secretName }} - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - projected: - sources: - {{- if .Values.nginx.usage.caSecretName }} - - secret: - name: {{ .Values.nginx.usage.caSecretName }} - {{- end }} - {{- if .Values.nginx.usage.clientSSLSecretName }} - - secret: - name: {{ .Values.nginx.usage.clientSSLSecretName }} - {{- end }} - {{- end }} - {{- end }} {{- with .Values.extraVolumes -}} {{ toYaml . | nindent 6 }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/scc.yaml b/charts/nginx-gateway-fabric/templates/scc.yaml index 8156b279b7..e58389a8ec 100644 --- a/charts/nginx-gateway-fabric/templates/scc.yaml +++ b/charts/nginx-gateway-fabric/templates/scc.yaml @@ -3,7 +3,7 @@ kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 metadata: name: {{ include "nginx-gateway.scc-name" . }} -allowPrivilegeEscalation: {{ .Values.nginxGateway.securityContext.allowPrivilegeEscalation }} +allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false diff --git a/charts/nginx-gateway-fabric/templates/service.yaml b/charts/nginx-gateway-fabric/templates/service.yaml index a80686dc7e..7324f04723 100644 --- a/charts/nginx-gateway-fabric/templates/service.yaml +++ b/charts/nginx-gateway-fabric/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.service.create }} apiVersion: v1 kind: Service metadata: @@ -6,30 +5,16 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "nginx-gateway.labels" . | nindent 4 }} -{{- if .Values.service.annotations }} +{{- if .Values.nginxGateway.service.annotations }} annotations: -{{ toYaml .Values.service.annotations | indent 4 }} +{{ toYaml .Values.nginxGateway.service.annotations | indent 4 }} {{- end }} spec: -{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} - {{- if .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} - {{- end }} -{{- end }} - type: {{ .Values.service.type }} -{{- if eq .Values.service.type "LoadBalancer" }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} - {{- end }} -{{- end}} + type: ClusterIP selector: {{- include "nginx-gateway.selectorLabels" . | nindent 4 }} - ports: # Update the following ports to match your Gateway Listener ports -{{- if .Values.service.ports }} -{{ toYaml .Values.service.ports | indent 2 }} -{{ end }} -{{- end }} + ports: + - name: grpc + port: 443 + protocol: TCP + targetPort: 443 diff --git a/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml b/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml new file mode 100644 index 0000000000..9ddaea89f1 --- /dev/null +++ b/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml @@ -0,0 +1,169 @@ +# apiVersion: apps/v1 +# kind: Deployment +# metadata: +# name: tmp-nginx-deployment +# namespace: {{ .Release.Namespace }} +# spec: +# template: +# spec: +# initContainers: +# - name: init +# image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} +# imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} +# command: +# - /usr/bin/gateway +# - initialize +# - --source +# - /includes/main.conf +# {{- if .Values.nginx.plus }} +# - --source +# - /includes/mgmt.conf +# - --nginx-plus +# {{- end }} +# - --destination +# - /etc/nginx/main-includes +# env: +# - name: POD_UID +# valueFrom: +# fieldRef: +# fieldPath: metadata.uid +# securityContext: +# seccompProfile: +# type: RuntimeDefault +# capabilities: +# add: +# - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsUser: 102 +# runAsGroup: 1001 +# volumeMounts: +# - name: nginx-includes-bootstrap +# mountPath: /includes +# - name: nginx-main-includes +# mountPath: /etc/nginx/main-includes +# containers: +# - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} +# imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} +# name: nginx +# {{- if .Values.nginx.lifecycle }} +# lifecycle: +# {{- toYaml .Values.nginx.lifecycle | nindent 10 }} +# {{- end }} +# ports: +# - containerPort: 80 +# name: http +# - containerPort: 443 +# name: https +# securityContext: +# seccompProfile: +# type: RuntimeDefault +# allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} +# capabilities: +# add: +# - NET_BIND_SERVICE +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsUser: 101 +# runAsGroup: 1001 +# volumeMounts: +# - name: nginx-conf +# mountPath: /etc/nginx/conf.d +# - name: nginx-stream-conf +# mountPath: /etc/nginx/stream-conf.d +# - name: nginx-main-includes +# mountPath: /etc/nginx/main-includes +# - name: nginx-secrets +# mountPath: /etc/nginx/secrets +# - name: nginx-run +# mountPath: /var/run/nginx +# - name: nginx-cache +# mountPath: /var/cache/nginx +# - name: nginx-includes +# mountPath: /etc/nginx/includes +# {{- if .Values.nginx.plus }} +# - name: nginx-lib +# mountPath: /var/lib/nginx/state +# {{- if .Values.nginx.usage.secretName }} +# - name: nginx-plus-license +# mountPath: /etc/nginx/license.jwt +# subPath: license.jwt +# {{- end }} +# {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} +# - name: nginx-plus-usage-certs +# mountPath: /etc/nginx/certs-bootstrap/ +# {{- end }} +# {{- end }} +# {{- with .Values.nginx.extraVolumeMounts -}} +# {{ toYaml . | nindent 8 }} +# {{- end }} +# {{- if .Values.nginx.debug }} +# command: +# - "/bin/sh" +# args: +# - "-c" +# - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" +# {{- end }} +# terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} +# {{- if .Values.affinity }} +# affinity: +# {{- toYaml .Values.affinity | nindent 8 }} +# {{- end }} +# serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} +# shareProcessNamespace: true +# securityContext: +# fsGroup: 1001 +# runAsNonRoot: true +# {{- if .Values.tolerations }} +# tolerations: +# {{- toYaml .Values.tolerations | nindent 6 }} +# {{- end }} +# {{- if .Values.nodeSelector }} +# nodeSelector: +# {{- toYaml .Values.nodeSelector | nindent 8 }} +# {{- end }} +# volumes: +# - name: nginx-conf +# emptyDir: {} +# - name: nginx-stream-conf +# emptyDir: {} +# - name: nginx-main-includes +# emptyDir: {} +# - name: nginx-secrets +# emptyDir: {} +# - name: nginx-run +# emptyDir: {} +# - name: nginx-cache +# emptyDir: {} +# - name: nginx-includes +# emptyDir: {} +# - name: nginx-includes-bootstrap +# configMap: +# name: nginx-includes-bootstrap +# {{- if .Values.nginx.plus }} +# - name: nginx-lib +# emptyDir: {} +# {{- if .Values.nginx.usage.secretName }} +# - name: nginx-plus-license +# secret: +# secretName: {{ .Values.nginx.usage.secretName }} +# {{- end }} +# {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} +# - name: nginx-plus-usage-certs +# projected: +# sources: +# {{- if .Values.nginx.usage.caSecretName }} +# - secret: +# name: {{ .Values.nginx.usage.caSecretName }} +# {{- end }} +# {{- if .Values.nginx.usage.clientSSLSecretName }} +# - secret: +# name: {{ .Values.nginx.usage.clientSSLSecretName }} +# {{- end }} +# {{- end }} +# {{- end }} +# {{- with .Values.extraVolumes -}} +# {{ toYaml . | nindent 6 }} +# {{- end }} diff --git a/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml b/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml new file mode 100644 index 0000000000..30901bfb6a --- /dev/null +++ b/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml @@ -0,0 +1,35 @@ +# {{- if .Values.service.create }} +# apiVersion: v1 +# kind: Service +# metadata: +# name: {{ include "nginx-gateway.fullname" . }} +# namespace: {{ .Release.Namespace }} +# labels: +# {{- include "nginx-gateway.labels" . | nindent 4 }} +# {{- if .Values.service.annotations }} +# annotations: +# {{ toYaml .Values.service.annotations | indent 4 }} +# {{- end }} +# spec: +# {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} +# {{- if .Values.service.externalTrafficPolicy }} +# externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +# {{- end }} +# {{- end }} +# type: {{ .Values.service.type }} +# {{- if eq .Values.service.type "LoadBalancer" }} +# {{- if .Values.service.loadBalancerIP }} +# loadBalancerIP: {{ .Values.service.loadBalancerIP }} +# {{- end }} +# {{- if .Values.service.loadBalancerSourceRanges }} +# loadBalancerSourceRanges: +# {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} +# {{- end }} +# {{- end}} +# selector: +# {{- include "nginx-gateway.selectorLabels" . | nindent 4 }} +# ports: # Update the following ports to match your Gateway Listener ports +# {{- if .Values.service.ports }} +# {{ toYaml .Values.service.ports | indent 2 }} +# {{ end }} +# {{- end }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 5ef6236304..84ea47c5b8 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -287,6 +287,20 @@ "title": "plus", "type": "boolean" }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "default": false, + "description": "Some environments may need this set to true in order for the control plane to successfully reload NGINX.", + "required": [], + "title": "allowPrivilegeEscalation", + "type": "boolean" + } + }, + "required": [], + "title": "securityContext", + "type": "object" + }, "usage": { "description": "Configuration for NGINX Plus usage reporting.", "properties": { @@ -511,7 +525,7 @@ "type": "object" }, "readinessProbe": { - "description": "# Defines the settings for the control plane readiness probe. This probe returns Ready when the controller\n# has started and configured NGINX to serve traffic.", + "description": "# Defines the settings for the control plane readiness probe. This probe returns Ready when the controller\n# has started and is ready to configure NGINX.", "properties": { "enable": { "default": true, @@ -554,18 +568,17 @@ "title": "resources", "type": "object" }, - "securityContext": { + "service": { "properties": { - "allowPrivilegeEscalation": { - "default": false, - "description": "Some environments may need this set to true in order for the control plane to successfully reload NGINX.", + "annotations": { + "description": "The annotations of the NGINX Gateway Fabric control plane service.", "required": [], - "title": "allowPrivilegeEscalation", - "type": "boolean" + "title": "annotations", + "type": "object" } }, "required": [], - "title": "securityContext", + "title": "service", "type": "object" }, "snippetsFilters": { diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index c817fb76b7..4cdd1b42db 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -50,6 +50,10 @@ nginxGateway: # -- Set of custom annotations for NginxGateway objects. configAnnotations: {} + service: + # -- The annotations of the NGINX Gateway Fabric control plane service. + annotations: {} + # -- The number of replicas of the NGINX Gateway Fabric Deployment. replicaCount: 1 @@ -66,7 +70,7 @@ nginxGateway: lockName: "" ## Defines the settings for the control plane readiness probe. This probe returns Ready when the controller - ## has started and configured NGINX to serve traffic. + ## has started and is ready to configure NGINX. readinessProbe: # -- Enable the /readyz endpoint on the control plane. enable: true @@ -94,10 +98,6 @@ nginxGateway: # @schema pullPolicy: Always - securityContext: - # -- Some environments may need this set to true in order for the control plane to successfully reload NGINX. - allowPrivilegeEscalation: false - productTelemetry: # -- Enable the collection of product telemetry. enable: true @@ -134,6 +134,10 @@ nginx: # @schema pullPolicy: Always + securityContext: + # -- Some environments may need this set to true in order for the control plane to successfully reload NGINX. + allowPrivilegeEscalation: false + # -- Is NGINX Plus image being used plus: false diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index ed2b473de1..a48d649f28 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -19,12 +19,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" ngxConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" ) // These flags are shared by multiple commands. diff --git a/cmd/gateway/initialize.go b/cmd/gateway/initialize.go index 59af1e4f0e..02865d1a89 100644 --- a/cmd/gateway/initialize.go +++ b/cmd/gateway/initialize.go @@ -8,9 +8,9 @@ import ( "github.com/go-logr/logr" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" ) const ( @@ -58,7 +58,7 @@ func initialize(cfg initializeConfig) error { return fmt.Errorf("failed to generate deployment context file: %w", err) } - if err := file.WriteFile(cfg.fileManager, depCtxFile); err != nil { + if err := file.Write(cfg.fileManager, depCtxFile); err != nil { return fmt.Errorf("failed to write deployment context file: %w", err) } diff --git a/cmd/gateway/initialize_test.go b/cmd/gateway/initialize_test.go index 6f0f00ad8f..4d7e606c0f 100644 --- a/cmd/gateway/initialize_test.go +++ b/cmd/gateway/initialize_test.go @@ -11,11 +11,11 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file/filefakes" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing/licensingfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml index 698f9b82ca..d6eaf45103 100644 --- a/config/tests/static-deployment.yaml +++ b/config/tests/static-deployment.yaml @@ -21,38 +21,6 @@ spec: app.kubernetes.io/name: nginx-gateway app.kubernetes.io/instance: nginx-gateway spec: - initContainers: - - name: init - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 102 - runAsGroup: 1001 - volumeMounts: - - name: nginx-includes-bootstrap - mountPath: /includes - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes containers: - args: - static-mode @@ -96,83 +64,14 @@ spec: securityContext: seccompProfile: type: RuntimeDefault - allowPrivilegeEscalation: false capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true runAsUser: 102 runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - volumeMounts: - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-cache - mountPath: /var/cache/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes terminationGracePeriodSeconds: 30 serviceAccountName: nginx-gateway - shareProcessNamespace: true securityContext: fsGroup: 1001 runAsNonRoot: true - volumes: - - name: nginx-conf - emptyDir: {} - - name: nginx-stream-conf - emptyDir: {} - - name: nginx-main-includes - emptyDir: {} - - name: nginx-secrets - emptyDir: {} - - name: nginx-run - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-includes - emptyDir: {} - - name: nginx-includes-bootstrap - configMap: - name: nginx-includes-bootstrap diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index bd222a1ece..534abf6dfa 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -160,9 +160,6 @@ metadata: apiVersion: v1 kind: Service metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-type: external labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway @@ -170,20 +167,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -250,85 +242,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -336,35 +251,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 990adedf38..ebe6879868 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -167,20 +167,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -247,85 +242,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -333,37 +251,13 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes nodeSelector: kubernetes.io/os: linux securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index 9a0746a1d9..534abf6dfa 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -167,20 +167,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -247,85 +242,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -333,35 +251,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 69f8a68c58..3cae2cf0f2 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -185,20 +185,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -268,93 +263,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -362,40 +272,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index 0248ce832e..a7bec5e823 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -172,20 +172,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -253,85 +248,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -339,35 +257,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 4d7180632d..dbdb743df2 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -180,20 +180,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -262,93 +257,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -356,40 +266,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index 414317999b..534abf6dfa 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -167,20 +167,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: NodePort + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -247,85 +242,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -333,35 +251,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index 61a8f82587..940f15457d 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -175,20 +175,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -255,85 +250,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -341,35 +259,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 4e896d2f22..c05cf2f26a 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -182,20 +182,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -265,93 +260,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -359,40 +269,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 8f220c8fe3..63a114ab6d 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -169,20 +169,15 @@ metadata: name: nginx-gateway namespace: nginx-gateway spec: - externalTrafficPolicy: Local ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https + - name: grpc port: 443 protocol: TCP targetPort: 443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway - type: LoadBalancer + type: ClusterIP --- apiVersion: apps/v1 kind: Deployment @@ -250,85 +245,8 @@ spec: port: health initialDelaySeconds: 3 periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - KILL - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 102 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /etc/nginx/includes - name: nginx-includes - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init securityContext: capabilities: - add: - - KILL drop: - ALL readOnlyRootFilesystem: true @@ -336,35 +254,11 @@ spec: runAsUser: 102 seccompProfile: type: RuntimeDefault - volumeMounts: - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway - shareProcessNamespace: true terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/go.mod b/go.mod index 8c5212cca9..e19a5cfde8 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,6 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 github.com/nginx/telemetry-exporter v0.1.4 - github.com/nginxinc/nginx-plus-go-client v1.3.0 - github.com/nginxinc/nginx-prometheus-exporter v1.3.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 github.com/prometheus/client_golang v1.20.5 diff --git a/go.sum b/go.sum index 82275b5edb..e451191866 100644 --- a/go.sum +++ b/go.sum @@ -87,10 +87,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= -github.com/nginxinc/nginx-plus-go-client v1.3.0 h1:q/aeT4B5k0KLwWlefoBzfLfraBBvIKLuDg+lLFWAo4I= -github.com/nginxinc/nginx-plus-go-client v1.3.0/go.mod h1:n8OFLzrJulJ2fur28Cwa1Qp5DZNS2VicLV+Adt30LQ4= -github.com/nginxinc/nginx-prometheus-exporter v1.3.0 h1:1JtdxsZH0Uwhu1nL/j/QyOXytP5V5j68AEo2X+DFWb0= -github.com/nginxinc/nginx-prometheus-exporter v1.3.0/go.mod h1:hXoH+X6aIKSyQuO6QTIiPKH3eZyxqy/wW8GYiE3dflU= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= diff --git a/internal/mode/static/nginx/file/manager.go b/internal/framework/file/file.go similarity index 53% rename from internal/mode/static/nginx/file/manager.go rename to internal/framework/file/file.go index 52c64216f1..3533bda2ee 100644 --- a/internal/mode/static/nginx/file/manager.go +++ b/internal/framework/file/file.go @@ -4,10 +4,7 @@ import ( "errors" "fmt" "io" - "io/fs" "os" - - "github.com/go-logr/logr" ) //go:generate go tool counterfeiter -generate @@ -49,13 +46,8 @@ type File struct { //counterfeiter:generate . OSFileManager -// OSFileManager is an interface that exposes File I/O operations for ManagerImpl. -// Used for unit testing. +// OSFileManager is an interface that exposes File I/O operations. type OSFileManager interface { - // ReadDir returns the directory entries for the directory. - ReadDir(dirname string) ([]fs.DirEntry, error) - // Remove file with given name. - Remove(name string) error // Create file at the provided filepath. Create(name string) (*os.File, error) // Chmod sets the mode of the file. @@ -68,68 +60,7 @@ type OSFileManager interface { Copy(dst io.Writer, src io.Reader) error } -//counterfeiter:generate . Manager - -// Manager manages NGINX configuration files. -type Manager interface { - // ReplaceFiles replaces the files on the file system with the given files removing any previous files. - ReplaceFiles(files []File) error -} - -// ManagerImpl is an implementation of Manager. -// Note: It is not thread safe. -type ManagerImpl struct { - logger logr.Logger - osFileManager OSFileManager - lastWrittenPaths []string -} - -// NewManagerImpl creates a new NewManagerImpl. -func NewManagerImpl(logger logr.Logger, osFileManager OSFileManager) *ManagerImpl { - return &ManagerImpl{ - logger: logger, - osFileManager: osFileManager, - } -} - -// ReplaceFiles replaces the files on the file system with the given files removing any previous files. -// It panics if a file type is unknown. -func (m *ManagerImpl) ReplaceFiles(files []File) error { - for _, path := range m.lastWrittenPaths { - if err := m.osFileManager.Remove(path); err != nil { - if os.IsNotExist(err) { - m.logger.Info( - "File not found when attempting to delete", - "path", path, - "error", err, - ) - continue - } - return fmt.Errorf("failed to delete file %q: %w", path, err) - } - - m.logger.V(1).Info("Deleted file", "path", path) - } - - // In some cases, NGINX reads files in runtime, like a JWK. If you remove such file, NGINX will fail - // any request (return 500 status code) that involves reading the file. - // However, we don't have such files yet, so we're not considering this case. - - m.lastWrittenPaths = make([]string, 0, len(files)) - - for _, file := range files { - if err := WriteFile(m.osFileManager, file); err != nil { - return fmt.Errorf("failed to write file %q of type %v: %w", file.Path, file.Type, err) - } - - m.lastWrittenPaths = append(m.lastWrittenPaths, file.Path) - m.logger.V(1).Info("Wrote file", "path", file.Path) - } - - return nil -} - -func WriteFile(fileMgr OSFileManager, file File) error { +func Write(fileMgr OSFileManager, file File) error { ensureType(file.Type) f, err := fileMgr.Create(file.Path) diff --git a/internal/mode/static/nginx/file/file_suite_test.go b/internal/framework/file/file_suite_test.go similarity index 100% rename from internal/mode/static/nginx/file/file_suite_test.go rename to internal/framework/file/file_suite_test.go diff --git a/internal/framework/file/file_test.go b/internal/framework/file/file_test.go new file mode 100644 index 0000000000..9c63c0ba27 --- /dev/null +++ b/internal/framework/file/file_test.go @@ -0,0 +1,155 @@ +package file_test + +import ( + "errors" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file/filefakes" +) + +var _ = Describe("Write files", Ordered, func() { + var ( + mgr file.OSFileManager + tmpDir string + regular1, regular2, secret file.File + ) + + ensureFiles := func(files []file.File) { + entries, err := os.ReadDir(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(entries).Should(HaveLen(len(files))) + + entriesMap := make(map[string]os.DirEntry) + for _, entry := range entries { + entriesMap[entry.Name()] = entry + } + + for _, f := range files { + _, ok := entriesMap[filepath.Base(f.Path)] + Expect(ok).Should(BeTrue()) + + info, err := os.Stat(f.Path) + Expect(err).ToNot(HaveOccurred()) + + Expect(info.IsDir()).To(BeFalse()) + + if f.Type == file.TypeRegular { + Expect(info.Mode()).To(Equal(os.FileMode(0o644))) + } else { + Expect(info.Mode()).To(Equal(os.FileMode(0o640))) + } + + bytes, err := os.ReadFile(f.Path) + Expect(err).ToNot(HaveOccurred()) + Expect(bytes).To(Equal(f.Content)) + } + } + + BeforeAll(func() { + mgr = file.NewStdLibOSFileManager() + tmpDir = GinkgoT().TempDir() + + regular1 = file.File{ + Type: file.TypeRegular, + Path: filepath.Join(tmpDir, "regular-1.conf"), + Content: []byte("regular-1"), + } + regular2 = file.File{ + Type: file.TypeRegular, + Path: filepath.Join(tmpDir, "regular-2.conf"), + Content: []byte("regular-2"), + } + secret = file.File{ + Type: file.TypeSecret, + Path: filepath.Join(tmpDir, "secret.conf"), + Content: []byte("secret"), + } + }) + + It("should write files", func() { + files := []file.File{regular1, regular2, secret} + + for _, f := range files { + Expect(file.Write(mgr, f)).To(Succeed()) + } + + ensureFiles(files) + }) + + When("file type is not supported", func() { + It("should panic", func() { + mgr = file.NewStdLibOSFileManager() + + f := file.File{ + Type: 123, + Path: "unsupported.conf", + } + + replace := func() { + _ = file.Write(mgr, f) + } + + Expect(replace).Should(Panic()) + }) + }) + + Describe("Edge cases with IO errors", func() { + var ( + files = []file.File{ + { + Type: file.TypeRegular, + Path: "regular.conf", + Content: []byte("regular"), + }, + { + Type: file.TypeSecret, + Path: "secret.conf", + Content: []byte("secret"), + }, + } + errTest = errors.New("test error") + ) + + DescribeTable( + "should return error on file IO error", + func(fakeOSMgr *filefakes.FakeOSFileManager) { + mgr := fakeOSMgr + + for _, f := range files { + err := file.Write(mgr, f) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(errTest)) + } + }, + Entry( + "Create", + &filefakes.FakeOSFileManager{ + CreateStub: func(_ string) (*os.File, error) { + return nil, errTest + }, + }, + ), + Entry( + "Chmod", + &filefakes.FakeOSFileManager{ + ChmodStub: func(_ *os.File, _ os.FileMode) error { + return errTest + }, + }, + ), + Entry( + "Write", + &filefakes.FakeOSFileManager{ + WriteStub: func(_ *os.File, _ []byte) error { + return errTest + }, + }, + ), + ) + }) +}) diff --git a/internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go b/internal/framework/file/filefakes/fake_osfile_manager.go similarity index 72% rename from internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go rename to internal/framework/file/filefakes/fake_osfile_manager.go index 9f63e0a025..52bf2fb1ad 100644 --- a/internal/mode/static/nginx/file/filefakes/fake_osfile_manager.go +++ b/internal/framework/file/filefakes/fake_osfile_manager.go @@ -3,11 +3,10 @@ package filefakes import ( "io" - "io/fs" "os" "sync" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" ) type FakeOSFileManager struct { @@ -61,30 +60,6 @@ type FakeOSFileManager struct { result1 *os.File result2 error } - ReadDirStub func(string) ([]fs.DirEntry, error) - readDirMutex sync.RWMutex - readDirArgsForCall []struct { - arg1 string - } - readDirReturns struct { - result1 []fs.DirEntry - result2 error - } - readDirReturnsOnCall map[int]struct { - result1 []fs.DirEntry - result2 error - } - RemoveStub func(string) error - removeMutex sync.RWMutex - removeArgsForCall []struct { - arg1 string - } - removeReturns struct { - result1 error - } - removeReturnsOnCall map[int]struct { - result1 error - } WriteStub func(*os.File, []byte) error writeMutex sync.RWMutex writeArgsForCall []struct { @@ -353,131 +328,6 @@ func (fake *FakeOSFileManager) OpenReturnsOnCall(i int, result1 *os.File, result }{result1, result2} } -func (fake *FakeOSFileManager) ReadDir(arg1 string) ([]fs.DirEntry, error) { - fake.readDirMutex.Lock() - ret, specificReturn := fake.readDirReturnsOnCall[len(fake.readDirArgsForCall)] - fake.readDirArgsForCall = append(fake.readDirArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadDirStub - fakeReturns := fake.readDirReturns - fake.recordInvocation("ReadDir", []interface{}{arg1}) - fake.readDirMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeOSFileManager) ReadDirCallCount() int { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - return len(fake.readDirArgsForCall) -} - -func (fake *FakeOSFileManager) ReadDirCalls(stub func(string) ([]fs.DirEntry, error)) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = stub -} - -func (fake *FakeOSFileManager) ReadDirArgsForCall(i int) string { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - argsForCall := fake.readDirArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeOSFileManager) ReadDirReturns(result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - fake.readDirReturns = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeOSFileManager) ReadDirReturnsOnCall(i int, result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - if fake.readDirReturnsOnCall == nil { - fake.readDirReturnsOnCall = make(map[int]struct { - result1 []fs.DirEntry - result2 error - }) - } - fake.readDirReturnsOnCall[i] = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeOSFileManager) Remove(arg1 string) error { - fake.removeMutex.Lock() - ret, specificReturn := fake.removeReturnsOnCall[len(fake.removeArgsForCall)] - fake.removeArgsForCall = append(fake.removeArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveStub - fakeReturns := fake.removeReturns - fake.recordInvocation("Remove", []interface{}{arg1}) - fake.removeMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeOSFileManager) RemoveCallCount() int { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - return len(fake.removeArgsForCall) -} - -func (fake *FakeOSFileManager) RemoveCalls(stub func(string) error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = stub -} - -func (fake *FakeOSFileManager) RemoveArgsForCall(i int) string { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - argsForCall := fake.removeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeOSFileManager) RemoveReturns(result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - fake.removeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeOSFileManager) RemoveReturnsOnCall(i int, result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - if fake.removeReturnsOnCall == nil { - fake.removeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.removeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeOSFileManager) Write(arg1 *os.File, arg2 []byte) error { var arg2Copy []byte if arg2 != nil { @@ -556,10 +406,6 @@ func (fake *FakeOSFileManager) Invocations() map[string][][]interface{} { defer fake.createMutex.RUnlock() fake.openMutex.RLock() defer fake.openMutex.RUnlock() - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() fake.writeMutex.RLock() defer fake.writeMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} diff --git a/internal/mode/static/nginx/file/os_filemanager.go b/internal/framework/file/os_filemanager.go similarity index 100% rename from internal/mode/static/nginx/file/os_filemanager.go rename to internal/framework/file/os_filemanager.go diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 54dde7ade8..91b904d40e 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -2,13 +2,11 @@ package static import ( "context" - "errors" "fmt" "sync" "time" "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -22,9 +20,8 @@ import ( frameworkStatus "github.com/nginx/nginx-gateway-fabric/internal/framework/status" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ngxConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -38,12 +35,10 @@ type handlerMetricsCollector interface { // eventHandlerConfig holds configuration parameters for eventHandlerImpl. type eventHandlerConfig struct { - // nginxFileMgr is the file Manager for nginx. - nginxFileMgr file.Manager + // nginxUpdater updates nginx configuration using the NGINX agent. + nginxUpdater agent.NginxUpdater // metricsCollector collects metrics for this controller. metricsCollector handlerMetricsCollector - // nginxRuntimeMgr manages nginx runtime. - nginxRuntimeMgr runtime.Manager // statusUpdater updates statuses on Kubernetes resources. statusUpdater frameworkStatus.GroupUpdater // processor is the state ChangeProcessor. @@ -62,8 +57,8 @@ type eventHandlerConfig struct { eventRecorder record.EventRecorder // deployCtxCollector collects the deployment context for N+ licensing deployCtxCollector licensing.Collector - // nginxConfiguredOnStartChecker sets the health of the Pod to Ready once we've written out our initial config. - nginxConfiguredOnStartChecker *nginxConfiguredOnStartChecker + // graphBuiltHealthChecker sets the health of the Pod to Ready once we've built our initial graph. + graphBuiltHealthChecker *graphBuiltHealthChecker // gatewayPodConfig contains information about this Pod. gatewayPodConfig ngfConfig.GatewayPodConfig // controlConfigNSName is the NamespacedName of the NginxGateway config for this controller. @@ -164,13 +159,15 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log changeType, gr := h.cfg.processor.Process() + // Once we've processed resources on startup and built our first graph, mark the Pod as ready. + if !h.cfg.graphBuiltHealthChecker.ready { + h.cfg.graphBuiltHealthChecker.setAsReady() + } + var err error switch changeType { case state.NoChange: logger.Info("Handling events didn't result into NGINX configuration changes") - if !h.cfg.nginxConfiguredOnStartChecker.ready && h.cfg.nginxConfiguredOnStartChecker.firstBatchError == nil { - h.cfg.nginxConfiguredOnStartChecker.setAsReady() - } return case state.EndpointsOnlyChange: h.version++ @@ -184,9 +181,9 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.setLatestConfiguration(&cfg) if h.cfg.plus { - err = h.updateUpstreamServers(cfg) + h.cfg.nginxUpdater.UpdateUpstreamServers() } else { - err = h.updateNginxConf(ctx, cfg) + err = h.updateNginxConf(cfg) } case state.ClusterStateChange: h.version++ @@ -199,21 +196,15 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.setLatestConfiguration(&cfg) - err = h.updateNginxConf(ctx, cfg) + err = h.updateNginxConf(cfg) } var nginxReloadRes status.NginxReloadResult if err != nil { logger.Error(err, "Failed to update NGINX configuration") nginxReloadRes.Error = err - if !h.cfg.nginxConfiguredOnStartChecker.ready { - h.cfg.nginxConfiguredOnStartChecker.firstBatchError = err - } } else { logger.Info("NGINX configuration was successfully updated") - if !h.cfg.nginxConfiguredOnStartChecker.ready { - h.cfg.nginxConfiguredOnStartChecker.setAsReady() - } } h.latestReloadResult = nginxReloadRes @@ -304,134 +295,21 @@ func (h *eventHandlerImpl) parseAndCaptureEvent(ctx context.Context, logger logr } // updateNginxConf updates nginx conf files and reloads nginx. -func (h *eventHandlerImpl) updateNginxConf( - ctx context.Context, - conf dataplane.Configuration, -) error { +// +//nolint:unparam // temporarily returning only nil +func (h *eventHandlerImpl) updateNginxConf(conf dataplane.Configuration) error { files := h.cfg.generator.Generate(conf) - if err := h.cfg.nginxFileMgr.ReplaceFiles(files); err != nil { - return fmt.Errorf("failed to replace NGINX configuration files: %w", err) - } - if err := h.cfg.nginxRuntimeMgr.Reload(ctx, conf.Version); err != nil { - return fmt.Errorf("failed to reload NGINX: %w", err) - } + h.cfg.nginxUpdater.UpdateConfig(len(files)) // If using NGINX Plus, update upstream servers using the API. - if err := h.updateUpstreamServers(conf); err != nil { - return fmt.Errorf("failed to update upstream servers: %w", err) + if h.cfg.plus { + h.cfg.nginxUpdater.UpdateUpstreamServers() } return nil } -// updateUpstreamServers determines which servers have changed and uses the NGINX Plus API to update them. -// Only applicable when using NGINX Plus. -func (h *eventHandlerImpl) updateUpstreamServers(conf dataplane.Configuration) error { - if !h.cfg.plus { - return nil - } - - prevUpstreams, prevStreamUpstreams, err := h.cfg.nginxRuntimeMgr.GetUpstreams() - if err != nil { - return fmt.Errorf("failed to get upstreams from API: %w", err) - } - - type upstream struct { - name string - servers []ngxclient.UpstreamServer - } - var upstreams []upstream - - for _, u := range conf.Upstreams { - confUpstream := upstream{ - name: u.Name, - servers: ngxConfig.ConvertEndpoints(u.Endpoints), - } - - if u, ok := prevUpstreams[confUpstream.name]; ok { - if !serversEqual(confUpstream.servers, u.Peers) { - upstreams = append(upstreams, confUpstream) - } - } - } - - type streamUpstream struct { - name string - servers []ngxclient.StreamUpstreamServer - } - var streamUpstreams []streamUpstream - - for _, u := range conf.StreamUpstreams { - confUpstream := streamUpstream{ - name: u.Name, - servers: ngxConfig.ConvertStreamEndpoints(u.Endpoints), - } - - if u, ok := prevStreamUpstreams[confUpstream.name]; ok { - if !serversEqual(confUpstream.servers, u.Peers) { - streamUpstreams = append(streamUpstreams, confUpstream) - } - } - } - - var updateErr error - for _, upstream := range upstreams { - if err := h.cfg.nginxRuntimeMgr.UpdateHTTPServers(upstream.name, upstream.servers); err != nil { - updateErr = errors.Join(updateErr, fmt.Errorf( - "couldn't update upstream %q via the API: %w", upstream.name, err)) - } - } - - for _, upstream := range streamUpstreams { - if err := h.cfg.nginxRuntimeMgr.UpdateStreamServers(upstream.name, upstream.servers); err != nil { - updateErr = errors.Join(updateErr, fmt.Errorf( - "couldn't update stream upstream %q via the API: %w", upstream.name, err)) - } - } - - return updateErr -} - -// serversEqual accepts lists of either UpstreamServer/Peer or StreamUpstreamServer/StreamPeer and determines -// if the server names within these lists are equal. -func serversEqual[ - upstreamServer ngxclient.UpstreamServer | ngxclient.StreamUpstreamServer, - peer ngxclient.Peer | ngxclient.StreamPeer, -](newServers []upstreamServer, oldServers []peer) bool { - if len(newServers) != len(oldServers) { - return false - } - - getServerVal := func(T any) string { - var server string - switch t := T.(type) { - case ngxclient.UpstreamServer: - server = t.Server - case ngxclient.StreamUpstreamServer: - server = t.Server - case ngxclient.Peer: - server = t.Server - case ngxclient.StreamPeer: - server = t.Server - } - return server - } - - diff := make(map[string]struct{}, len(newServers)) - for _, s := range newServers { - diff[getServerVal(s)] = struct{}{} - } - - for _, s := range oldServers { - if _, ok := diff[getServerVal(s)]; !ok { - return false - } - } - - return true -} - // updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status // based on the outcome. func (h *eventHandlerImpl) updateControlPlaneAndSetStatus( diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 5774487650..43644981b4 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -5,7 +5,6 @@ import ( "errors" "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "go.uber.org/zap" @@ -20,15 +19,14 @@ import ( ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/status/statusfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing/licensingfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime/runtimefakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -37,17 +35,16 @@ import ( var _ = Describe("eventHandler", func() { var ( - handler *eventHandlerImpl - fakeProcessor *statefakes.FakeChangeProcessor - fakeGenerator *configfakes.FakeGenerator - fakeNginxFileMgr *filefakes.FakeManager - fakeNginxRuntimeMgr *runtimefakes.FakeManager - fakeStatusUpdater *statusfakes.FakeGroupUpdater - fakeEventRecorder *record.FakeRecorder - fakeK8sClient client.WithWatch - namespace = "nginx-gateway" - configName = "nginx-gateway-config" - zapLogLevelSetter zapLogLevelSetter + handler *eventHandlerImpl + fakeProcessor *statefakes.FakeChangeProcessor + fakeGenerator *configfakes.FakeGenerator + fakeNginxUpdater *agentfakes.FakeNginxUpdater + fakeStatusUpdater *statusfakes.FakeGroupUpdater + fakeEventRecorder *record.FakeRecorder + fakeK8sClient client.WithWatch + namespace = "nginx-gateway" + configName = "nginx-gateway-config" + zapLogLevelSetter zapLogLevelSetter ) const nginxGatewayServiceName = "nginx-gateway" @@ -67,11 +64,9 @@ var _ = Describe("eventHandler", func() { Expect(fakeGenerator.GenerateCallCount()).Should(Equal(1)) Expect(fakeGenerator.GenerateArgsForCall(0)).Should(Equal(expectedConf)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).Should(Equal(1)) - files := fakeNginxFileMgr.ReplaceFilesArgsForCall(0) - Expect(files).Should(Equal(expectedFiles)) - - Expect(fakeNginxRuntimeMgr.ReloadCallCount()).Should(Equal(1)) + Expect(fakeNginxUpdater.UpdateConfigCallCount()).Should(Equal(1)) + lenFiles := fakeNginxUpdater.UpdateConfigArgsForCall(0) + Expect(expectedFiles).To(HaveLen(lenFiles)) Expect(fakeStatusUpdater.UpdateGroupCallCount()).Should(Equal(2)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) @@ -87,8 +82,7 @@ var _ = Describe("eventHandler", func() { fakeProcessor = &statefakes.FakeChangeProcessor{} fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) fakeGenerator = &configfakes.FakeGenerator{} - fakeNginxFileMgr = &filefakes.FakeManager{} - fakeNginxRuntimeMgr = &runtimefakes.FakeManager{} + fakeNginxUpdater = &agentfakes.FakeNginxUpdater{} fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} fakeEventRecorder = record.NewFakeRecorder(1) zapLogLevelSetter = newZapLogLevelSetter(zap.NewAtomicLevel()) @@ -98,17 +92,16 @@ var _ = Describe("eventHandler", func() { Expect(fakeK8sClient.Create(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) handler = newEventHandlerImpl(eventHandlerConfig{ - k8sClient: fakeK8sClient, - processor: fakeProcessor, - generator: fakeGenerator, - logLevelSetter: zapLogLevelSetter, - nginxFileMgr: fakeNginxFileMgr, - nginxRuntimeMgr: fakeNginxRuntimeMgr, - statusUpdater: fakeStatusUpdater, - eventRecorder: fakeEventRecorder, - deployCtxCollector: &licensingfakes.FakeCollector{}, - nginxConfiguredOnStartChecker: newNginxConfiguredOnStartChecker(), - controlConfigNSName: types.NamespacedName{Namespace: namespace, Name: configName}, + k8sClient: fakeK8sClient, + processor: fakeProcessor, + generator: fakeGenerator, + logLevelSetter: zapLogLevelSetter, + nginxUpdater: fakeNginxUpdater, + statusUpdater: fakeStatusUpdater, + eventRecorder: fakeEventRecorder, + deployCtxCollector: &licensingfakes.FakeCollector{}, + graphBuiltHealthChecker: newGraphBuiltHealthChecker(), + controlConfigNSName: types.NamespacedName{Namespace: namespace, Name: configName}, gatewayPodConfig: config.GatewayPodConfig{ ServiceName: "nginx-gateway", Namespace: "nginx-gateway", @@ -116,7 +109,7 @@ var _ = Describe("eventHandler", func() { metricsCollector: collectors.NewControllerNoopCollector(), updateGatewayClassStatus: true, }) - Expect(handler.cfg.nginxConfiguredOnStartChecker.ready).To(BeFalse()) + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) }) Describe("Process the Gateway API resources events", func() { @@ -146,7 +139,7 @@ var _ = Describe("eventHandler", func() { }) AfterEach(func() { - Expect(handler.cfg.nginxConfiguredOnStartChecker.ready).To(BeTrue()) + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeTrue()) }) When("a batch has one event", func() { @@ -416,23 +409,6 @@ var _ = Describe("eventHandler", func() { BeforeEach(func() { fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{}) - upstreams := ngxclient.Upstreams{ - "one": ngxclient.Upstream{ - Peers: []ngxclient.Peer{ - {Server: "server1"}, - }, - }, - } - - streamUpstreams := ngxclient.StreamUpstreams{ - "two": ngxclient.StreamUpstream{ - Peers: []ngxclient.StreamPeer{ - {Server: "server2"}, - }, - }, - } - - fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, streamUpstreams, nil) }) When("running NGINX Plus", func() { @@ -446,8 +422,7 @@ var _ = Describe("eventHandler", func() { Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) Expect(fakeGenerator.GenerateCallCount()).To(Equal(0)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(0)) - Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateUpstreamServersCallCount()).To(Equal(1)) }) }) @@ -459,90 +434,20 @@ var _ = Describe("eventHandler", func() { Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) - Expect(fakeNginxFileMgr.ReplaceFilesCallCount()).To(Equal(1)) - Expect(fakeNginxRuntimeMgr.GetUpstreamsCallCount()).To(Equal(0)) - Expect(fakeNginxRuntimeMgr.ReloadCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateConfigCallCount()).To(Equal(1)) + Expect(fakeNginxUpdater.UpdateUpstreamServersCallCount()).To(Equal(0)) }) }) }) - When("updating upstream servers", func() { - conf := dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "one", - }, - }, - StreamUpstreams: []dataplane.Upstream{ - { - Name: "two", - }, - }, - } - - BeforeEach(func() { - upstreams := ngxclient.Upstreams{ - "one": ngxclient.Upstream{ - Peers: []ngxclient.Peer{ - {Server: "server1"}, - }, - }, - } - - streamUpstreams := ngxclient.StreamUpstreams{ - "two": ngxclient.StreamUpstream{ - Peers: []ngxclient.StreamPeer{ - {Server: "server2"}, - }, - }, - } - - fakeNginxRuntimeMgr.GetUpstreamsReturns(upstreams, streamUpstreams, nil) - }) - - When("running NGINX Plus", func() { - BeforeEach(func() { - handler.cfg.plus = true - }) - - It("should update servers using the NGINX Plus API", func() { - Expect(handler.updateUpstreamServers(conf)).To(Succeed()) - Expect(fakeNginxRuntimeMgr.UpdateHTTPServersCallCount()).To(Equal(1)) - }) - - It("should return error when GET API returns an error", func() { - fakeNginxRuntimeMgr.GetUpstreamsReturns(nil, nil, errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - - It("should return error when UpdateHTTPServers API returns an error", func() { - fakeNginxRuntimeMgr.UpdateHTTPServersReturns(errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - - It("should return error when UpdateStreamServers API returns an error", func() { - fakeNginxRuntimeMgr.UpdateStreamServersReturns(errors.New("error")) - Expect(handler.updateUpstreamServers(conf)).ToNot(Succeed()) - }) - }) - - When("not running NGINX Plus", func() { - It("should not do anything", func() { - Expect(handler.updateUpstreamServers(conf)).To(Succeed()) - - Expect(fakeNginxRuntimeMgr.UpdateHTTPServersCallCount()).To(Equal(0)) - }) - }) - }) - - It("should set the health checker status properly when there are changes", func() { + It("should set the health checker status properly", func() { e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() + readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) @@ -550,55 +455,7 @@ var _ = Describe("eventHandler", func() { Expect(readyChannel).To(BeClosed()) - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) - }) - - It("should set the health checker status properly when there are no changes or errors", func() { - e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} - batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - - Expect(readyChannel).To(BeClosed()) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) - }) - - It("should set the health checker status properly when there is an error", func() { - e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} - batch := []interface{}{e} - readyChannel := handler.cfg.nginxConfiguredOnStartChecker.getReadyCh() - - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - fakeNginxRuntimeMgr.ReloadReturns(errors.New("reload error")) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - - // now send an update with no changes; should still return an error - fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).ToNot(Succeed()) - - // error goes away - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - fakeNginxRuntimeMgr.ReloadReturns(nil) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) - - Expect(readyChannel).To(BeClosed()) - - Expect(handler.cfg.nginxConfiguredOnStartChecker.readyCheck(nil)).To(Succeed()) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).To(Succeed()) }) It("should panic for an unknown event type", func() { @@ -615,83 +472,6 @@ var _ = Describe("eventHandler", func() { }) }) -var _ = Describe("serversEqual", func() { - DescribeTable("determines if HTTP server lists are equal", - func(newServers []ngxclient.UpstreamServer, oldServers []ngxclient.Peer, equal bool) { - Expect(serversEqual(newServers, oldServers)).To(Equal(equal)) - }, - Entry("different length", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server2"}, - }, - false, - ), - Entry("differing elements", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server3"}, - }, - false, - ), - Entry("same elements", - []ngxclient.UpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.Peer{ - {Server: "server1"}, - {Server: "server2"}, - }, - true, - ), - ) - DescribeTable("determines if stream server lists are equal", - func(newServers []ngxclient.StreamUpstreamServer, oldServers []ngxclient.StreamPeer, equal bool) { - Expect(serversEqual(newServers, oldServers)).To(Equal(equal)) - }, - Entry("different length", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server2"}, - }, - false, - ), - Entry("differing elements", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server3"}, - }, - false, - ), - Entry("same elements", - []ngxclient.StreamUpstreamServer{ - {Server: "server1"}, - {Server: "server2"}, - }, - []ngxclient.StreamPeer{ - {Server: "server1"}, - {Server: "server2"}, - }, - true, - ), - ) -}) - var _ = Describe("getGatewayAddresses", func() { It("gets gateway addresses from a Service", func() { fakeClient := fake.NewFakeClient() diff --git a/internal/mode/static/health.go b/internal/mode/static/health.go index 180c67d643..a0fe4e9b59 100644 --- a/internal/mode/static/health.go +++ b/internal/mode/static/health.go @@ -6,49 +6,44 @@ import ( "sync" ) -// newNginxConfiguredOnStartChecker creates a new nginxConfiguredOnStartChecker. -func newNginxConfiguredOnStartChecker() *nginxConfiguredOnStartChecker { - return &nginxConfiguredOnStartChecker{ +// newGraphBuiltHealthChecker creates a new graphBuiltHealthChecker. +func newGraphBuiltHealthChecker() *graphBuiltHealthChecker { + return &graphBuiltHealthChecker{ readyCh: make(chan struct{}), } } -// nginxConfiguredOnStartChecker is used to check if nginx is successfully configured and if the NGF Pod is ready. -type nginxConfiguredOnStartChecker struct { - // firstBatchError is set when the first batch fails to configure nginx - // and we don't want to set ourselves as ready on the next batch if nothing changes - firstBatchError error - // readyCh is a channel that is initialized in newNginxConfiguredOnStartChecker and represents if the NGF Pod is ready. +// graphBuiltHealthChecker is used to check if the initial graph is built and the NGF Pod is ready. +type graphBuiltHealthChecker struct { + // readyCh is a channel that is initialized in newGraphBuiltHealthChecker and represents if the NGF Pod is ready. readyCh chan struct{} lock sync.RWMutex ready bool } // readyCheck returns the ready-state of the Pod. It satisfies the controller-runtime Checker type. -// We are considered ready after the handler processed the first batch. In case there is NGINX configuration -// to write, it must be written and NGINX must be reloaded successfully. -func (h *nginxConfiguredOnStartChecker) readyCheck(_ *http.Request) error { +// We are considered ready after the first graph is built. +func (h *graphBuiltHealthChecker) readyCheck(_ *http.Request) error { h.lock.RLock() defer h.lock.RUnlock() if !h.ready { - return errors.New("nginx has not yet become ready to accept traffic") + return errors.New("control plane is not yet ready") } return nil } // setAsReady marks the health check as ready. -func (h *nginxConfiguredOnStartChecker) setAsReady() { +func (h *graphBuiltHealthChecker) setAsReady() { h.lock.Lock() defer h.lock.Unlock() h.ready = true - h.firstBatchError = nil close(h.readyCh) } // getReadyCh returns a read-only channel, which determines if the NGF Pod is ready. -func (h *nginxConfiguredOnStartChecker) getReadyCh() <-chan struct{} { +func (h *graphBuiltHealthChecker) getReadyCh() <-chan struct{} { return h.readyCh } diff --git a/internal/mode/static/health_test.go b/internal/mode/static/health_test.go index 5bfd7aab73..7246283ed9 100644 --- a/internal/mode/static/health_test.go +++ b/internal/mode/static/health_test.go @@ -9,9 +9,9 @@ import ( func TestReadyCheck(t *testing.T) { t.Parallel() g := NewWithT(t) - nginxChecker := newNginxConfiguredOnStartChecker() - g.Expect(nginxChecker.readyCheck(nil)).ToNot(Succeed()) + healthChecker := newGraphBuiltHealthChecker() + g.Expect(healthChecker.readyCheck(nil)).ToNot(Succeed()) - nginxChecker.ready = true - g.Expect(nginxChecker.readyCheck(nil)).To(Succeed()) + healthChecker.ready = true + g.Expect(healthChecker.readyCheck(nil)).To(Succeed()) } diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 6ab99b4516..ea5d61b16d 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -3,7 +3,6 @@ package static import ( "context" "fmt" - "os" "time" "github.com/go-logr/logr" @@ -50,14 +49,13 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ngxcfg "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/observability" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/upstreamsettings" ngxvalidation "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/validation" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - ngxruntime "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" @@ -90,10 +88,9 @@ func init() { utilruntime.Must(appsv1.AddToScheme(scheme)) } -//nolint:gocyclo func StartManager(cfg config.Config) error { - nginxChecker := newNginxConfiguredOnStartChecker() - mgr, err := createManager(cfg, nginxChecker) + healthChecker := newGraphBuiltHealthChecker() + mgr, err := createManager(cfg, healthChecker) if err != nil { return fmt.Errorf("cannot build runtime manager: %w", err) } @@ -150,64 +147,20 @@ func StartManager(cfg config.Config) error { PlusSecrets: plusSecrets, }) - // Clear the configuration folders to ensure that no files are left over in case the control plane was restarted - // (this assumes the folders are in a shared volume). - removedPaths, err := file.ClearFolders(file.NewStdLibOSFileManager(), ngxcfg.ConfigFolders) - for _, path := range removedPaths { - cfg.Logger.Info("removed configuration file", "path", path) - } - if err != nil { - return fmt.Errorf("cannot clear NGINX configuration folders: %w", err) - } - - processHandler := ngxruntime.NewProcessHandlerImpl(os.ReadFile, os.Stat) - - // Ensure NGINX is running before registering metrics & starting the manager. - p, err := processHandler.FindMainProcess(ctx, ngxruntime.PidFileTimeout) - if err != nil { - return fmt.Errorf("NGINX is not running: %w", err) - } - cfg.Logger.V(1).Info("NGINX is running with PID", "pid", p) - - var ( - ngxruntimeCollector ngxruntime.MetricsCollector = collectors.NewManagerNoopCollector() - handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() - ) - - var ngxPlusClient ngxruntime.NginxPlusClient - if cfg.Plus { - ngxPlusClient, err = ngxruntime.CreatePlusClient() - if err != nil { - return fmt.Errorf("error creating NGINX plus client: %w", err) - } - } + var handlerCollector handlerMetricsCollector = collectors.NewControllerNoopCollector() if cfg.MetricsConfig.Enabled { constLabels := map[string]string{"class": cfg.GatewayClassName} - var ngxCollector prometheus.Collector - if cfg.Plus { - ngxCollector, err = collectors.NewNginxPlusMetricsCollector(ngxPlusClient, constLabels, promLogger) - } else { - ngxCollector = collectors.NewNginxMetricsCollector(constLabels, promLogger) - } - if err != nil { - return fmt.Errorf("cannot create nginx metrics collector: %w", err) - } - ngxruntimeCollector = collectors.NewManagerMetricsCollector(constLabels) + ngxruntimeCollector := collectors.NewManagerMetricsCollector(constLabels) handlerCollector = collectors.NewControllerCollector(constLabels) - ngxruntimeCollector, ok := ngxruntimeCollector.(prometheus.Collector) - if !ok { - return fmt.Errorf("ngxruntimeCollector is not a prometheus.Collector: %w", status.ErrFailedAssert) - } handlerCollector, ok := handlerCollector.(prometheus.Collector) if !ok { return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", status.ErrFailedAssert) } metrics.Registry.MustRegister( - ngxCollector, ngxruntimeCollector, handlerCollector, ) @@ -226,37 +179,30 @@ func StartManager(cfg config.Config) error { }) eventHandler := newEventHandlerImpl(eventHandlerConfig{ - nginxFileMgr: file.NewManagerImpl( - cfg.Logger.WithName("nginxFileManager"), - file.NewStdLibOSFileManager(), - ), + nginxUpdater: &agent.NginxUpdaterImpl{ + Logger: cfg.Logger.WithName("nginxUpdater"), + Plus: cfg.Plus, + }, metricsCollector: handlerCollector, - nginxRuntimeMgr: ngxruntime.NewManagerImpl( - ngxPlusClient, - ngxruntimeCollector, - cfg.Logger.WithName("nginxRuntimeManager"), - processHandler, - ngxruntime.NewVerifyClient(ngxruntime.NginxReloadTimeout), - ), - statusUpdater: groupStatusUpdater, - processor: processor, - serviceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), + statusUpdater: groupStatusUpdater, + processor: processor, + serviceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), generator: ngxcfg.NewGeneratorImpl( cfg.Plus, &cfg.UsageReportConfig, cfg.Logger.WithName("generator"), ), - k8sClient: mgr.GetClient(), - k8sReader: mgr.GetAPIReader(), - logLevelSetter: logLevelSetter, - eventRecorder: recorder, - deployCtxCollector: deployCtxCollector, - nginxConfiguredOnStartChecker: nginxChecker, - gatewayPodConfig: cfg.GatewayPodConfig, - controlConfigNSName: controlConfigNSName, - gatewayCtlrName: cfg.GatewayCtlrName, - updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, - plus: cfg.Plus, + k8sClient: mgr.GetClient(), + k8sReader: mgr.GetAPIReader(), + logLevelSetter: logLevelSetter, + eventRecorder: recorder, + deployCtxCollector: deployCtxCollector, + graphBuiltHealthChecker: healthChecker, + gatewayPodConfig: cfg.GatewayPodConfig, + controlConfigNSName: controlConfigNSName, + gatewayCtlrName: cfg.GatewayCtlrName, + updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, + plus: cfg.Plus, }) objects, objectLists := prepareFirstEventBatchPreparerArgs(cfg) @@ -291,7 +237,7 @@ func StartManager(cfg config.Config) error { Flags: cfg.Flags, }) - job, err := createTelemetryJob(cfg, dataCollector, nginxChecker.getReadyCh()) + job, err := createTelemetryJob(cfg, dataCollector, healthChecker.getReadyCh()) if err != nil { return fmt.Errorf("cannot create telemetry job: %w", err) } @@ -332,7 +278,7 @@ func createPolicyManager( return policies.NewManager(mustExtractGVK, cfgs...) } -func createManager(cfg config.Config, nginxChecker *nginxConfiguredOnStartChecker) (manager.Manager, error) { +func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (manager.Manager, error) { options := manager.Options{ Scheme: scheme, Logger: cfg.Logger.V(1), @@ -367,7 +313,7 @@ func createManager(cfg config.Config, nginxChecker *nginxConfiguredOnStartChecke } if cfg.HealthConfig.Enabled { - if err := mgr.AddReadyzCheck("readyz", nginxChecker.readyCheck); err != nil { + if err := mgr.AddReadyzCheck("readyz", healthChecker.readyCheck); err != nil { return nil, fmt.Errorf("error adding ready check: %w", err) } } diff --git a/internal/mode/static/metrics/collectors/nginx.go b/internal/mode/static/metrics/collectors/nginx.go deleted file mode 100644 index 838dcf6429..0000000000 --- a/internal/mode/static/metrics/collectors/nginx.go +++ /dev/null @@ -1,48 +0,0 @@ -package collectors - -import ( - "fmt" - - "github.com/go-kit/log" - "github.com/nginxinc/nginx-plus-go-client/client" - prometheusClient "github.com/nginxinc/nginx-prometheus-exporter/client" - nginxCollector "github.com/nginxinc/nginx-prometheus-exporter/collector" - "github.com/prometheus/client_golang/prometheus" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -const ( - nginxStatusSock = "/var/run/nginx/nginx-status.sock" - nginxStatusURI = "http://config-status/stub_status" -) - -// NewNginxMetricsCollector creates an NginxCollector which fetches stats from NGINX over a unix socket. -func NewNginxMetricsCollector(constLabels map[string]string, logger log.Logger) prometheus.Collector { - httpClient := runtime.GetSocketClient(nginxStatusSock) - ngxClient := prometheusClient.NewNginxClient(&httpClient, nginxStatusURI) - - return nginxCollector.NewNginxCollector(ngxClient, metrics.Namespace, constLabels, logger) -} - -// NewNginxPlusMetricsCollector creates an NginxCollector which fetches stats from NGINX Plus API over a unix socket. -func NewNginxPlusMetricsCollector( - plusClient runtime.NginxPlusClient, - constLabels map[string]string, - logger log.Logger, -) (prometheus.Collector, error) { - nc, ok := plusClient.(*client.NginxClient) - if !ok { - panic(fmt.Sprintf("expected *client.NginxClient, got %T", plusClient)) - } - collector := nginxCollector.NewNginxPlusCollector( - nc, - metrics.Namespace, - nginxCollector.VariableLabelNames{}, - constLabels, - logger, - ) - - return collector, nil -} diff --git a/internal/mode/static/metrics/collectors/nginx_runtime.go b/internal/mode/static/metrics/collectors/nginx_runtime.go index c84e696a2f..c762171d76 100644 --- a/internal/mode/static/metrics/collectors/nginx_runtime.go +++ b/internal/mode/static/metrics/collectors/nginx_runtime.go @@ -8,6 +8,15 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics" ) +// MetricsCollector is an interface for the metrics of the NGINX runtime manager. +// +//counterfeiter:generate . MetricsCollector +type MetricsCollector interface { + IncReloadCount() + IncReloadErrors() + ObserveLastReloadTime(ms time.Duration) +} + // NginxRuntimeCollector implements runtime.Collector interface and prometheus.Collector interface. type NginxRuntimeCollector struct { // Metrics @@ -97,20 +106,3 @@ func (c *NginxRuntimeCollector) Collect(ch chan<- prometheus.Metric) { c.configStale.Collect(ch) c.reloadsDuration.Collect(ch) } - -// ManagerNoopCollector used to initialize the ManagerCollector when metrics are disabled to avoid nil pointer errors. -type ManagerNoopCollector struct{} - -// NewManagerNoopCollector creates a no-op collector that implements ManagerCollector interface. -func NewManagerNoopCollector() *ManagerNoopCollector { - return &ManagerNoopCollector{} -} - -// IncReloadCount implements a no-op IncReloadCount. -func (c *ManagerNoopCollector) IncReloadCount() {} - -// IncReloadErrors implements a no-op IncReloadErrors. -func (c *ManagerNoopCollector) IncReloadErrors() {} - -// ObserveLastReloadTime implements a no-op ObserveLastReloadTime. -func (c *ManagerNoopCollector) ObserveLastReloadTime(_ time.Duration) {} diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go new file mode 100644 index 0000000000..93777628f2 --- /dev/null +++ b/internal/mode/static/nginx/agent/agent.go @@ -0,0 +1,36 @@ +package agent + +import ( + "github.com/go-logr/logr" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +//counterfeiter:generate . NginxUpdater + +// NginxUpdater is an interface for updating NGINX using the NGINX agent. +type NginxUpdater interface { + UpdateConfig(int) + UpdateUpstreamServers() +} + +// NginxUpdaterImpl implements the NginxUpdater interface. +type NginxUpdaterImpl struct { + Logger logr.Logger + Plus bool +} + +// UpdateConfig sends the nginx configuration to the agent. +func (n *NginxUpdaterImpl) UpdateConfig(files int) { + n.Logger.Info("Sending nginx configuration to agent", "numFiles", files) +} + +// UpdateUpstreamServers sends an APIRequest to the agent to update upstream servers using the NGINX Plus API. +// Only applicable when using NGINX Plus. +func (n *NginxUpdaterImpl) UpdateUpstreamServers() { + if !n.Plus { + return + } + + n.Logger.Info("Updating upstream servers using NGINX Plus API") +} diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go new file mode 100644 index 0000000000..4d8858a173 --- /dev/null +++ b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go @@ -0,0 +1,106 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package agentfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" +) + +type FakeNginxUpdater struct { + UpdateConfigStub func(int) + updateConfigMutex sync.RWMutex + updateConfigArgsForCall []struct { + arg1 int + } + UpdateUpstreamServersStub func() + updateUpstreamServersMutex sync.RWMutex + updateUpstreamServersArgsForCall []struct { + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeNginxUpdater) UpdateConfig(arg1 int) { + fake.updateConfigMutex.Lock() + fake.updateConfigArgsForCall = append(fake.updateConfigArgsForCall, struct { + arg1 int + }{arg1}) + stub := fake.UpdateConfigStub + fake.recordInvocation("UpdateConfig", []interface{}{arg1}) + fake.updateConfigMutex.Unlock() + if stub != nil { + fake.UpdateConfigStub(arg1) + } +} + +func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + return len(fake.updateConfigArgsForCall) +} + +func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(int)) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = stub +} + +func (fake *FakeNginxUpdater) UpdateConfigArgsForCall(i int) int { + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + argsForCall := fake.updateConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServers() { + fake.updateUpstreamServersMutex.Lock() + fake.updateUpstreamServersArgsForCall = append(fake.updateUpstreamServersArgsForCall, struct { + }{}) + stub := fake.UpdateUpstreamServersStub + fake.recordInvocation("UpdateUpstreamServers", []interface{}{}) + fake.updateUpstreamServersMutex.Unlock() + if stub != nil { + fake.UpdateUpstreamServersStub() + } +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + return len(fake.updateUpstreamServersArgsForCall) +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func()) { + fake.updateUpstreamServersMutex.Lock() + defer fake.updateUpstreamServersMutex.Unlock() + fake.UpdateUpstreamServersStub = stub +} + +func (fake *FakeNginxUpdater) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.updateConfigMutex.RLock() + defer fake.updateConfigMutex.RUnlock() + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeNginxUpdater) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ agent.NginxUpdater = new(FakeNginxUpdater) diff --git a/internal/mode/static/nginx/agent/doc.go b/internal/mode/static/nginx/agent/doc.go new file mode 100644 index 0000000000..8ffe4381f3 --- /dev/null +++ b/internal/mode/static/nginx/agent/doc.go @@ -0,0 +1,4 @@ +/* +Package agent contains the functions for sending nginx configuration to the agent. +*/ +package agent diff --git a/internal/mode/static/nginx/config/configfakes/fake_generator.go b/internal/mode/static/nginx/config/configfakes/fake_generator.go index d92b09e7e4..fab0755398 100644 --- a/internal/mode/static/nginx/config/configfakes/fake_generator.go +++ b/internal/mode/static/nginx/config/configfakes/fake_generator.go @@ -4,8 +4,8 @@ package configfakes import ( "sync" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) diff --git a/internal/mode/static/nginx/config/convert.go b/internal/mode/static/nginx/config/convert.go deleted file mode 100644 index ece4e1b5c2..0000000000 --- a/internal/mode/static/nginx/config/convert.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "fmt" - - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" -) - -// ConvertEndpoints converts a list of Endpoints into a list of NGINX Plus SDK UpstreamServers. -func ConvertEndpoints(eps []resolver.Endpoint) []ngxclient.UpstreamServer { - servers := make([]ngxclient.UpstreamServer, 0, len(eps)) - - for _, ep := range eps { - port, format := getPortAndIPFormat(ep) - - server := ngxclient.UpstreamServer{ - Server: fmt.Sprintf(format, ep.Address, port), - } - - servers = append(servers, server) - } - - return servers -} - -// ConvertStreamEndpoints converts a list of Endpoints into a list of NGINX Plus SDK StreamUpstreamServers. -func ConvertStreamEndpoints(eps []resolver.Endpoint) []ngxclient.StreamUpstreamServer { - servers := make([]ngxclient.StreamUpstreamServer, 0, len(eps)) - - for _, ep := range eps { - port, format := getPortAndIPFormat(ep) - - server := ngxclient.StreamUpstreamServer{ - Server: fmt.Sprintf(format, ep.Address, port), - } - - servers = append(servers, server) - } - - return servers -} - -func getPortAndIPFormat(ep resolver.Endpoint) (string, string) { - var port string - - if ep.Port != 0 { - port = fmt.Sprintf(":%d", ep.Port) - } - - format := "%s%s" - if ep.IPv6 { - format = "[%s]%s" - } - - return port, format -} diff --git a/internal/mode/static/nginx/config/convert_test.go b/internal/mode/static/nginx/config/convert_test.go deleted file mode 100644 index 312b3d41c1..0000000000 --- a/internal/mode/static/nginx/config/convert_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package config - -import ( - "testing" - - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" -) - -func TestConvertEndpoints(t *testing.T) { - t.Parallel() - endpoints := []resolver.Endpoint{ - { - Address: "1.2.3.4", - Port: 80, - }, - { - Address: "5.6.7.8", - Port: 0, - }, - { - Address: "2001:db8::1", - Port: 443, - IPv6: true, - }, - } - - expUpstreams := []ngxclient.UpstreamServer{ - { - Server: "1.2.3.4:80", - }, - { - Server: "5.6.7.8", - }, - { - Server: "[2001:db8::1]:443", - }, - } - - g := NewWithT(t) - g.Expect(ConvertEndpoints(endpoints)).To(Equal(expUpstreams)) -} - -func TestConvertStreamEndpoints(t *testing.T) { - t.Parallel() - endpoints := []resolver.Endpoint{ - { - Address: "1.2.3.4", - Port: 80, - }, - { - Address: "5.6.7.8", - Port: 0, - }, - { - Address: "2001:db8::1", - Port: 443, - IPv6: true, - }, - } - - expUpstreams := []ngxclient.StreamUpstreamServer{ - { - Server: "1.2.3.4:80", - }, - { - Server: "5.6.7.8", - }, - { - Server: "[2001:db8::1]:443", - }, - } - - g := NewWithT(t) - g.Expect(ConvertStreamEndpoints(endpoints)).To(Equal(expUpstreams)) -} diff --git a/internal/mode/static/nginx/config/generator.go b/internal/mode/static/nginx/config/generator.go index 49be7696d2..3fd5eac10a 100644 --- a/internal/mode/static/nginx/config/generator.go +++ b/internal/mode/static/nginx/config/generator.go @@ -7,13 +7,13 @@ import ( "github.com/go-logr/logr" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/http" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/observability" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/upstreamsettings" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) @@ -47,9 +47,6 @@ const ( // streamConfigFile is the path to the configuration file with Stream configuration. streamConfigFile = streamFolder + "/stream.conf" - // configVersionFile is the path to the config version configuration file. - configVersionFile = httpFolder + "/config-version.conf" - // httpMatchVarsFile is the path to the http_match pairs configuration file. httpMatchVarsFile = httpFolder + "/matches.json" @@ -63,10 +60,6 @@ const ( nginxPlusConfigFile = httpFolder + "/plus-api.conf" ) -// ConfigFolders is a list of folders where NGINX configuration files are stored. -// Volumes here also need to be added to our crossplane ephemeral test container. -var ConfigFolders = []string{httpFolder, secretsFolder, includesFolder, mainIncludesFolder, streamFolder} - // Generator generates NGINX configuration files. // This interface is used for testing purposes only. type Generator interface { @@ -201,7 +194,6 @@ func (g GeneratorImpl) getExecuteFuncs( g.executeStreamServers, g.executeStreamUpstreams, executeStreamMaps, - executeVersion, executePlusAPI, } } diff --git a/internal/mode/static/nginx/config/generator_test.go b/internal/mode/static/nginx/config/generator_test.go index 79bb35a90f..92fb02f403 100644 --- a/internal/mode/static/nginx/config/generator_test.go +++ b/internal/mode/static/nginx/config/generator_test.go @@ -1,7 +1,6 @@ package config_test import ( - "fmt" "sort" "testing" @@ -9,10 +8,10 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" @@ -145,7 +144,7 @@ func TestGenerate(t *testing.T) { files := generator.Generate(conf) - g.Expect(files).To(HaveLen(18)) + g.Expect(files).To(HaveLen(17)) arrange := func(i, j int) bool { return files[i].Path < files[j].Path } @@ -153,7 +152,6 @@ func TestGenerate(t *testing.T) { /* Order of files: - /etc/nginx/conf.d/config-version.conf /etc/nginx/conf.d/http.conf /etc/nginx/conf.d/matches.json /etc/nginx/conf.d/plus-api.conf @@ -174,13 +172,8 @@ func TestGenerate(t *testing.T) { */ g.Expect(files[0].Type).To(Equal(file.TypeRegular)) - g.Expect(files[0].Path).To(Equal("/etc/nginx/conf.d/config-version.conf")) - configVersion := string(files[0].Content) - g.Expect(configVersion).To(ContainSubstring(fmt.Sprintf("return 200 %d", conf.Version))) - - g.Expect(files[1].Type).To(Equal(file.TypeRegular)) - g.Expect(files[1].Path).To(Equal("/etc/nginx/conf.d/http.conf")) - httpCfg := string(files[1].Content) // converting to string so that on failure gomega prints strings not byte arrays + g.Expect(files[0].Path).To(Equal("/etc/nginx/conf.d/http.conf")) + httpCfg := string(files[0].Content) // converting to string so that on failure gomega prints strings not byte arrays // Note: this only verifies that Generate() returns a byte array with upstream, server, and split_client blocks. // It does not test the correctness of those blocks. That functionality is covered by other tests in this package. g.Expect(httpCfg).To(ContainSubstring("listen 80")) @@ -197,14 +190,14 @@ func TestGenerate(t *testing.T) { g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet1.conf;")) g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet2.conf;")) - g.Expect(files[2].Path).To(Equal("/etc/nginx/conf.d/matches.json")) - g.Expect(files[2].Type).To(Equal(file.TypeRegular)) + g.Expect(files[1].Path).To(Equal("/etc/nginx/conf.d/matches.json")) + g.Expect(files[1].Type).To(Equal(file.TypeRegular)) expString := "{}" - g.Expect(string(files[2].Content)).To(Equal(expString)) + g.Expect(string(files[1].Content)).To(Equal(expString)) - g.Expect(files[3].Path).To(Equal("/etc/nginx/conf.d/plus-api.conf")) - g.Expect(files[3].Type).To(Equal(file.TypeRegular)) - httpCfg = string(files[3].Content) + g.Expect(files[2].Path).To(Equal("/etc/nginx/conf.d/plus-api.conf")) + g.Expect(files[2].Type).To(Equal(file.TypeRegular)) + httpCfg = string(files[2].Content) g.Expect(httpCfg).To(ContainSubstring("listen unix:/var/run/nginx/nginx-plus-api.sock;")) g.Expect(httpCfg).To(ContainSubstring("access_log off;")) g.Expect(httpCfg).To(ContainSubstring("listen 8765;")) @@ -217,26 +210,26 @@ func TestGenerate(t *testing.T) { // snippet include files // content is not checked in this test. - g.Expect(files[4].Path).To(Equal("/etc/nginx/includes/http_snippet1.conf")) - g.Expect(files[5].Path).To(Equal("/etc/nginx/includes/http_snippet2.conf")) - g.Expect(files[6].Path).To(Equal("/etc/nginx/includes/main_snippet1.conf")) - g.Expect(files[7].Path).To(Equal("/etc/nginx/includes/main_snippet2.conf")) + g.Expect(files[3].Path).To(Equal("/etc/nginx/includes/http_snippet1.conf")) + g.Expect(files[4].Path).To(Equal("/etc/nginx/includes/http_snippet2.conf")) + g.Expect(files[5].Path).To(Equal("/etc/nginx/includes/main_snippet1.conf")) + g.Expect(files[6].Path).To(Equal("/etc/nginx/includes/main_snippet2.conf")) - g.Expect(files[8].Path).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) - deploymentCtx := string(files[8].Content) + g.Expect(files[7].Path).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) + deploymentCtx := string(files[7].Content) g.Expect(deploymentCtx).To(ContainSubstring("\"integration\":\"ngf\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_id\":\"test-uid\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"installation_id\":\"test-uid-replicaSet\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_node_count\":1")) - g.Expect(files[9].Path).To(Equal("/etc/nginx/main-includes/main.conf")) - mainConfStr := string(files[9].Content) + g.Expect(files[8].Path).To(Equal("/etc/nginx/main-includes/main.conf")) + mainConfStr := string(files[8].Content) g.Expect(mainConfStr).To(ContainSubstring("load_module modules/ngx_otel_module.so;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet1.conf;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet2.conf;")) - g.Expect(files[10].Path).To(Equal("/etc/nginx/main-includes/mgmt.conf")) - mgmtConf := string(files[10].Content) + g.Expect(files[9].Path).To(Equal("/etc/nginx/main-includes/mgmt.conf")) + mgmtConf := string(files[9].Content) g.Expect(mgmtConf).To(ContainSubstring("usage_report endpoint=test-endpoint")) g.Expect(mgmtConf).To(ContainSubstring("license_token /etc/nginx/secrets/license.jwt")) g.Expect(mgmtConf).To(ContainSubstring("deployment_context /etc/nginx/main-includes/deployment_ctx.json")) @@ -244,31 +237,31 @@ func TestGenerate(t *testing.T) { g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate /etc/nginx/secrets/mgmt-tls.crt")) g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate_key /etc/nginx/secrets/mgmt-tls.key")) - g.Expect(files[11].Path).To(Equal("/etc/nginx/secrets/license.jwt")) - g.Expect(string(files[11].Content)).To(Equal("license")) + g.Expect(files[10].Path).To(Equal("/etc/nginx/secrets/license.jwt")) + g.Expect(string(files[10].Content)).To(Equal("license")) - g.Expect(files[12].Path).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) - g.Expect(string(files[12].Content)).To(Equal("ca")) + g.Expect(files[11].Path).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) + g.Expect(string(files[11].Content)).To(Equal("ca")) - g.Expect(files[13].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) - g.Expect(string(files[13].Content)).To(Equal("cert")) + g.Expect(files[12].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) + g.Expect(string(files[12].Content)).To(Equal("cert")) - g.Expect(files[14].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) - g.Expect(string(files[14].Content)).To(Equal("key")) + g.Expect(files[13].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) + g.Expect(string(files[13].Content)).To(Equal("key")) - g.Expect(files[15].Path).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) - certBundle := string(files[15].Content) + g.Expect(files[14].Path).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) + certBundle := string(files[14].Content) g.Expect(certBundle).To(Equal("test-cert")) - g.Expect(files[16]).To(Equal(file.File{ + g.Expect(files[15]).To(Equal(file.File{ Type: file.TypeSecret, Path: "/etc/nginx/secrets/test-keypair.pem", Content: []byte("test-cert\ntest-key"), })) - g.Expect(files[17].Path).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) - g.Expect(files[17].Type).To(Equal(file.TypeRegular)) - streamCfg := string(files[17].Content) + g.Expect(files[16].Path).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) + g.Expect(files[16].Type).To(Equal(file.TypeRegular)) + streamCfg := string(files[16].Content) g.Expect(streamCfg).To(ContainSubstring("listen unix:/var/run/nginx/app.example.com-443.sock")) g.Expect(streamCfg).To(ContainSubstring("listen 443")) g.Expect(streamCfg).To(ContainSubstring("app.example.com unix:/var/run/nginx/app.example.com-443.sock")) diff --git a/internal/mode/static/nginx/config/main_config.go b/internal/mode/static/nginx/config/main_config.go index 8355c5d0db..1b27a52e74 100644 --- a/internal/mode/static/nginx/config/main_config.go +++ b/internal/mode/static/nginx/config/main_config.go @@ -3,9 +3,9 @@ package config import ( gotemplate "text/template" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/shared" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) diff --git a/internal/mode/static/nginx/config/version.go b/internal/mode/static/nginx/config/version.go deleted file mode 100644 index 20e677e270..0000000000 --- a/internal/mode/static/nginx/config/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package config - -import ( - gotemplate "text/template" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" -) - -var versionTemplate = gotemplate.Must(gotemplate.New("version").Parse(versionTemplateText)) - -func executeVersion(conf dataplane.Configuration) []executeResult { - result := executeResult{ - dest: configVersionFile, - data: helpers.MustExecuteTemplate(versionTemplate, conf.Version), - } - - return []executeResult{result} -} diff --git a/internal/mode/static/nginx/config/version_template.go b/internal/mode/static/nginx/config/version_template.go deleted file mode 100644 index ccf46e02cc..0000000000 --- a/internal/mode/static/nginx/config/version_template.go +++ /dev/null @@ -1,12 +0,0 @@ -package config - -const versionTemplateText = ` -server { - listen unix:/var/run/nginx/nginx-config-version.sock; - access_log off; - - location /version { - return 200 {{.}}; - } -} -` diff --git a/internal/mode/static/nginx/config/version_test.go b/internal/mode/static/nginx/config/version_test.go deleted file mode 100644 index ce5913ec95..0000000000 --- a/internal/mode/static/nginx/config/version_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package config - -import ( - "testing" - - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" -) - -func TestExecuteVersion(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - conf := dataplane.Configuration{Version: 42} - res := executeVersion(conf) - g.Expect(res).To(HaveLen(1)) - g.Expect(res[0].dest).To(Equal(configVersionFile)) - g.Expect(string(res[0].data)).To(ContainSubstring("return 200 42;")) -} diff --git a/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go b/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go deleted file mode 100644 index 90e3fe03d2..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_clear_folders_osfile_manager.go +++ /dev/null @@ -1,191 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "os" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" -) - -type FakeClearFoldersOSFileManager struct { - ReadDirStub func(string) ([]os.DirEntry, error) - readDirMutex sync.RWMutex - readDirArgsForCall []struct { - arg1 string - } - readDirReturns struct { - result1 []os.DirEntry - result2 error - } - readDirReturnsOnCall map[int]struct { - result1 []os.DirEntry - result2 error - } - RemoveStub func(string) error - removeMutex sync.RWMutex - removeArgsForCall []struct { - arg1 string - } - removeReturns struct { - result1 error - } - removeReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeClearFoldersOSFileManager) ReadDir(arg1 string) ([]os.DirEntry, error) { - fake.readDirMutex.Lock() - ret, specificReturn := fake.readDirReturnsOnCall[len(fake.readDirArgsForCall)] - fake.readDirArgsForCall = append(fake.readDirArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadDirStub - fakeReturns := fake.readDirReturns - fake.recordInvocation("ReadDir", []interface{}{arg1}) - fake.readDirMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirCallCount() int { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - return len(fake.readDirArgsForCall) -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirCalls(stub func(string) ([]os.DirEntry, error)) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = stub -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirArgsForCall(i int) string { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - argsForCall := fake.readDirArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirReturns(result1 []os.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - fake.readDirReturns = struct { - result1 []os.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeClearFoldersOSFileManager) ReadDirReturnsOnCall(i int, result1 []os.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - if fake.readDirReturnsOnCall == nil { - fake.readDirReturnsOnCall = make(map[int]struct { - result1 []os.DirEntry - result2 error - }) - } - fake.readDirReturnsOnCall[i] = struct { - result1 []os.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeClearFoldersOSFileManager) Remove(arg1 string) error { - fake.removeMutex.Lock() - ret, specificReturn := fake.removeReturnsOnCall[len(fake.removeArgsForCall)] - fake.removeArgsForCall = append(fake.removeArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveStub - fakeReturns := fake.removeReturns - fake.recordInvocation("Remove", []interface{}{arg1}) - fake.removeMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeClearFoldersOSFileManager) RemoveCallCount() int { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - return len(fake.removeArgsForCall) -} - -func (fake *FakeClearFoldersOSFileManager) RemoveCalls(stub func(string) error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = stub -} - -func (fake *FakeClearFoldersOSFileManager) RemoveArgsForCall(i int) string { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - argsForCall := fake.removeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeClearFoldersOSFileManager) RemoveReturns(result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - fake.removeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeClearFoldersOSFileManager) RemoveReturnsOnCall(i int, result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - if fake.removeReturnsOnCall == nil { - fake.removeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.removeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeClearFoldersOSFileManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeClearFoldersOSFileManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ file.ClearFoldersOSFileManager = new(FakeClearFoldersOSFileManager) diff --git a/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go b/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go deleted file mode 100644 index b51ecd7579..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_dir_entry.go +++ /dev/null @@ -1,301 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "io/fs" - "sync" -) - -type FakeDirEntry struct { - InfoStub func() (fs.FileInfo, error) - infoMutex sync.RWMutex - infoArgsForCall []struct { - } - infoReturns struct { - result1 fs.FileInfo - result2 error - } - infoReturnsOnCall map[int]struct { - result1 fs.FileInfo - result2 error - } - IsDirStub func() bool - isDirMutex sync.RWMutex - isDirArgsForCall []struct { - } - isDirReturns struct { - result1 bool - } - isDirReturnsOnCall map[int]struct { - result1 bool - } - NameStub func() string - nameMutex sync.RWMutex - nameArgsForCall []struct { - } - nameReturns struct { - result1 string - } - nameReturnsOnCall map[int]struct { - result1 string - } - TypeStub func() fs.FileMode - typeMutex sync.RWMutex - typeArgsForCall []struct { - } - typeReturns struct { - result1 fs.FileMode - } - typeReturnsOnCall map[int]struct { - result1 fs.FileMode - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeDirEntry) Info() (fs.FileInfo, error) { - fake.infoMutex.Lock() - ret, specificReturn := fake.infoReturnsOnCall[len(fake.infoArgsForCall)] - fake.infoArgsForCall = append(fake.infoArgsForCall, struct { - }{}) - stub := fake.InfoStub - fakeReturns := fake.infoReturns - fake.recordInvocation("Info", []interface{}{}) - fake.infoMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeDirEntry) InfoCallCount() int { - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - return len(fake.infoArgsForCall) -} - -func (fake *FakeDirEntry) InfoCalls(stub func() (fs.FileInfo, error)) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = stub -} - -func (fake *FakeDirEntry) InfoReturns(result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - fake.infoReturns = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) InfoReturnsOnCall(i int, result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - if fake.infoReturnsOnCall == nil { - fake.infoReturnsOnCall = make(map[int]struct { - result1 fs.FileInfo - result2 error - }) - } - fake.infoReturnsOnCall[i] = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) IsDir() bool { - fake.isDirMutex.Lock() - ret, specificReturn := fake.isDirReturnsOnCall[len(fake.isDirArgsForCall)] - fake.isDirArgsForCall = append(fake.isDirArgsForCall, struct { - }{}) - stub := fake.IsDirStub - fakeReturns := fake.isDirReturns - fake.recordInvocation("IsDir", []interface{}{}) - fake.isDirMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) IsDirCallCount() int { - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - return len(fake.isDirArgsForCall) -} - -func (fake *FakeDirEntry) IsDirCalls(stub func() bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = stub -} - -func (fake *FakeDirEntry) IsDirReturns(result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - fake.isDirReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) IsDirReturnsOnCall(i int, result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - if fake.isDirReturnsOnCall == nil { - fake.isDirReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isDirReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) Name() string { - fake.nameMutex.Lock() - ret, specificReturn := fake.nameReturnsOnCall[len(fake.nameArgsForCall)] - fake.nameArgsForCall = append(fake.nameArgsForCall, struct { - }{}) - stub := fake.NameStub - fakeReturns := fake.nameReturns - fake.recordInvocation("Name", []interface{}{}) - fake.nameMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) NameCallCount() int { - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - return len(fake.nameArgsForCall) -} - -func (fake *FakeDirEntry) NameCalls(stub func() string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = stub -} - -func (fake *FakeDirEntry) NameReturns(result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - fake.nameReturns = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) NameReturnsOnCall(i int, result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - if fake.nameReturnsOnCall == nil { - fake.nameReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.nameReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) Type() fs.FileMode { - fake.typeMutex.Lock() - ret, specificReturn := fake.typeReturnsOnCall[len(fake.typeArgsForCall)] - fake.typeArgsForCall = append(fake.typeArgsForCall, struct { - }{}) - stub := fake.TypeStub - fakeReturns := fake.typeReturns - fake.recordInvocation("Type", []interface{}{}) - fake.typeMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) TypeCallCount() int { - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - return len(fake.typeArgsForCall) -} - -func (fake *FakeDirEntry) TypeCalls(stub func() fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = stub -} - -func (fake *FakeDirEntry) TypeReturns(result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - fake.typeReturns = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) TypeReturnsOnCall(i int, result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - if fake.typeReturnsOnCall == nil { - fake.typeReturnsOnCall = make(map[int]struct { - result1 fs.FileMode - }) - } - fake.typeReturnsOnCall[i] = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeDirEntry) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ fs.DirEntry = new(FakeDirEntry) diff --git a/internal/mode/static/nginx/file/filefakes/fake_manager.go b/internal/mode/static/nginx/file/filefakes/fake_manager.go deleted file mode 100644 index 52b34e8e72..0000000000 --- a/internal/mode/static/nginx/file/filefakes/fake_manager.go +++ /dev/null @@ -1,116 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" -) - -type FakeManager struct { - ReplaceFilesStub func([]file.File) error - replaceFilesMutex sync.RWMutex - replaceFilesArgsForCall []struct { - arg1 []file.File - } - replaceFilesReturns struct { - result1 error - } - replaceFilesReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) ReplaceFiles(arg1 []file.File) error { - var arg1Copy []file.File - if arg1 != nil { - arg1Copy = make([]file.File, len(arg1)) - copy(arg1Copy, arg1) - } - fake.replaceFilesMutex.Lock() - ret, specificReturn := fake.replaceFilesReturnsOnCall[len(fake.replaceFilesArgsForCall)] - fake.replaceFilesArgsForCall = append(fake.replaceFilesArgsForCall, struct { - arg1 []file.File - }{arg1Copy}) - stub := fake.ReplaceFilesStub - fakeReturns := fake.replaceFilesReturns - fake.recordInvocation("ReplaceFiles", []interface{}{arg1Copy}) - fake.replaceFilesMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) ReplaceFilesCallCount() int { - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - return len(fake.replaceFilesArgsForCall) -} - -func (fake *FakeManager) ReplaceFilesCalls(stub func([]file.File) error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = stub -} - -func (fake *FakeManager) ReplaceFilesArgsForCall(i int) []file.File { - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - argsForCall := fake.replaceFilesArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeManager) ReplaceFilesReturns(result1 error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = nil - fake.replaceFilesReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) ReplaceFilesReturnsOnCall(i int, result1 error) { - fake.replaceFilesMutex.Lock() - defer fake.replaceFilesMutex.Unlock() - fake.ReplaceFilesStub = nil - if fake.replaceFilesReturnsOnCall == nil { - fake.replaceFilesReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.replaceFilesReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.replaceFilesMutex.RLock() - defer fake.replaceFilesMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ file.Manager = new(FakeManager) diff --git a/internal/mode/static/nginx/file/folders.go b/internal/mode/static/nginx/file/folders.go deleted file mode 100644 index 847ca6312a..0000000000 --- a/internal/mode/static/nginx/file/folders.go +++ /dev/null @@ -1,56 +0,0 @@ -package file - -import ( - "fmt" - "os" - "path/filepath" - "slices" -) - -//counterfeiter:generate io/fs.DirEntry - -//counterfeiter:generate . ClearFoldersOSFileManager - -// ClearFoldersOSFileManager is an interface that exposes File I/O operations for ClearFolders. -// Used for unit testing. -type ClearFoldersOSFileManager interface { - // ReadDir returns the directory entries for the directory. - ReadDir(dirname string) ([]os.DirEntry, error) - // Remove removes the file with given name. - Remove(name string) error -} - -// These files are needed on startup, so skip deleting them. -const ( - mainConf = "/etc/nginx/main-includes/main.conf" - mgmtConf = "/etc/nginx/main-includes/mgmt.conf" - deployCtx = "/etc/nginx/main-includes/deployment_ctx.json" -) - -var ignoreFilePaths = []string{mainConf, mgmtConf, deployCtx} - -// ClearFolders removes all files in the given folders and returns the removed files' full paths. -func ClearFolders(fileMgr ClearFoldersOSFileManager, paths []string) (removedFiles []string, e error) { - for _, path := range paths { - entries, err := fileMgr.ReadDir(path) - if err != nil { - return removedFiles, fmt.Errorf("failed to read directory %q: %w", path, err) - } - - for _, entry := range entries { - entryPath := filepath.Join(path, entry.Name()) - - if slices.Contains(ignoreFilePaths, entryPath) { - continue - } - - if err := fileMgr.Remove(entryPath); err != nil { - return removedFiles, fmt.Errorf("failed to remove %q: %w", entryPath, err) - } - - removedFiles = append(removedFiles, entryPath) - } - } - - return removedFiles, nil -} diff --git a/internal/mode/static/nginx/file/folders_test.go b/internal/mode/static/nginx/file/folders_test.go deleted file mode 100644 index 8ae8fe1808..0000000000 --- a/internal/mode/static/nginx/file/folders_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package file_test - -import ( - "errors" - "os" - "path/filepath" - "testing" - - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" -) - -func writeFile(t *testing.T, name string, data []byte) { - t.Helper() - g := NewWithT(t) - - //nolint:gosec // the file permission is ok for unit testing - g.Expect(os.WriteFile(name, data, 0o644)).To(Succeed()) -} - -func TestClearFoldersRemoves(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - tempDir := t.TempDir() - - path1 := filepath.Join(tempDir, "path1") - writeFile(t, path1, []byte("test")) - path2 := filepath.Join(tempDir, "path2") - writeFile(t, path2, []byte("test")) - - removedFiles, err := file.ClearFolders(file.NewStdLibOSFileManager(), []string{tempDir}) - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(removedFiles).To(ConsistOf(path1, path2)) - - entries, err := os.ReadDir(tempDir) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(entries).To(BeEmpty()) -} - -func TestClearFoldersIgnoresPaths(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - fakeFileMgr := &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return []os.DirEntry{ - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "deployment_ctx.json" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "mgmt.conf" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "main.conf" - }, - }, - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "can-be-removed.conf" - }, - }, - }, nil - }, - } - - removed, err := file.ClearFolders(fakeFileMgr, []string{"/etc/nginx/main-includes"}) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(removed).To(HaveLen(1)) - g.Expect(removed[0]).To(Equal("/etc/nginx/main-includes/can-be-removed.conf")) -} - -func TestClearFoldersFails(t *testing.T) { - t.Parallel() - files := []string{"file"} - - testErr := errors.New("test error") - - tests := []struct { - fileMgr *filefakes.FakeClearFoldersOSFileManager - name string - }{ - { - fileMgr: &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return nil, testErr - }, - }, - name: "ReadDir fails", - }, - { - fileMgr: &filefakes.FakeClearFoldersOSFileManager{ - ReadDirStub: func(_ string) ([]os.DirEntry, error) { - return []os.DirEntry{ - &filefakes.FakeDirEntry{ - NameStub: func() string { - return "file" - }, - }, - }, nil - }, - RemoveStub: func(_ string) error { - return testErr - }, - }, - name: "Remove fails", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - removedFiles, err := file.ClearFolders(test.fileMgr, files) - - g.Expect(err).To(MatchError(testErr)) - g.Expect(removedFiles).To(BeNil()) - }) - } -} diff --git a/internal/mode/static/nginx/file/manager_test.go b/internal/mode/static/nginx/file/manager_test.go deleted file mode 100644 index 114b81c3dc..0000000000 --- a/internal/mode/static/nginx/file/manager_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package file_test - -import ( - "errors" - "os" - "path/filepath" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/file/filefakes" -) - -var _ = Describe("EventHandler", func() { - Describe("Replace files", Ordered, func() { - var ( - mgr *file.ManagerImpl - tmpDir string - regular1, regular2, regular3, secret file.File - ) - - ensureFiles := func(files []file.File) { - entries, err := os.ReadDir(tmpDir) - Expect(err).ToNot(HaveOccurred()) - Expect(entries).Should(HaveLen(len(files))) - - entriesMap := make(map[string]os.DirEntry) - for _, entry := range entries { - entriesMap[entry.Name()] = entry - } - - for _, f := range files { - _, ok := entriesMap[filepath.Base(f.Path)] - Expect(ok).Should(BeTrue()) - - info, err := os.Stat(f.Path) - Expect(err).ToNot(HaveOccurred()) - - Expect(info.IsDir()).To(BeFalse()) - - if f.Type == file.TypeRegular { - Expect(info.Mode()).To(Equal(os.FileMode(0o644))) - } else { - Expect(info.Mode()).To(Equal(os.FileMode(0o640))) - } - - bytes, err := os.ReadFile(f.Path) - Expect(err).ToNot(HaveOccurred()) - Expect(bytes).To(Equal(f.Content)) - } - } - - ensureNotExist := func(files ...file.File) { - for _, f := range files { - _, err := os.Stat(f.Path) - Expect(os.IsNotExist(err)).To(BeTrue()) - } - } - - BeforeAll(func() { - mgr = file.NewManagerImpl(logr.Discard(), file.NewStdLibOSFileManager()) - tmpDir = GinkgoT().TempDir() - - regular1 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-1.conf"), - Content: []byte("regular-1"), - } - regular2 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-2.conf"), - Content: []byte("regular-2"), - } - regular3 = file.File{ - Type: file.TypeRegular, - Path: filepath.Join(tmpDir, "regular-3.conf"), - Content: []byte("regular-3"), - } - secret = file.File{ - Type: file.TypeSecret, - Path: filepath.Join(tmpDir, "secret.conf"), - Content: []byte("secret"), - } - }) - - It("should write initial config", func() { - files := []file.File{regular1, regular2, secret} - - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - - ensureFiles(files) - }) - - It("should write subsequent config", func() { - files := []file.File{ - regular2, // overwriting - regular3, // adding - secret, // overwriting - } - - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - - ensureFiles(files) - ensureNotExist(regular1) - }) - - It("should remove all files", func() { - err := mgr.ReplaceFiles(nil) - Expect(err).ToNot(HaveOccurred()) - - ensureNotExist(regular2, regular3, secret) - }) - }) - - When("file does not exist", func() { - It("should not error", func() { - fakeOSMgr := &filefakes.FakeOSFileManager{} - mgr := file.NewManagerImpl(logr.Discard(), fakeOSMgr) - - files := []file.File{ - { - Type: file.TypeRegular, - Path: "regular-1.conf", - Content: []byte("regular-1"), - }, - } - - Expect(mgr.ReplaceFiles(files)).ToNot(HaveOccurred()) - - fakeOSMgr.RemoveReturns(os.ErrNotExist) - Expect(mgr.ReplaceFiles(files)).ToNot(HaveOccurred()) - }) - }) - - When("file type is not supported", func() { - It("should panic", func() { - mgr := file.NewManagerImpl(logr.Discard(), nil) - - files := []file.File{ - { - Type: 123, - Path: "unsupported.conf", - }, - } - - replace := func() { - _ = mgr.ReplaceFiles(files) - } - - Expect(replace).Should(Panic()) - }) - }) - - Describe("Edge cases with IO errors", func() { - var ( - files = []file.File{ - { - Type: file.TypeRegular, - Path: "regular.conf", - Content: []byte("regular"), - }, - { - Type: file.TypeSecret, - Path: "secret.conf", - Content: []byte("secret"), - }, - } - errTest = errors.New("test error") - ) - - DescribeTable( - "should return error on file IO error", - func(fakeOSMgr *filefakes.FakeOSFileManager) { - mgr := file.NewManagerImpl(logr.Discard(), fakeOSMgr) - - // special case for Remove - // to kick off removing, we need to successfully write files beforehand - if fakeOSMgr.RemoveStub != nil { - err := mgr.ReplaceFiles(files) - Expect(err).ToNot(HaveOccurred()) - } - - err := mgr.ReplaceFiles(files) - Expect(err).Should(HaveOccurred()) - Expect(err).To(MatchError(errTest)) - }, - Entry( - "Remove", - &filefakes.FakeOSFileManager{ - RemoveStub: func(_ string) error { - return errTest - }, - }, - ), - Entry( - "Create", - &filefakes.FakeOSFileManager{ - CreateStub: func(_ string) (*os.File, error) { - return nil, errTest - }, - }, - ), - Entry( - "Chmod", - &filefakes.FakeOSFileManager{ - ChmodStub: func(_ *os.File, _ os.FileMode) error { - return errTest - }, - }, - ), - Entry( - "Write", - &filefakes.FakeOSFileManager{ - WriteStub: func(_ *os.File, _ []byte) error { - return errTest - }, - }, - ), - ) - }) -}) diff --git a/internal/mode/static/nginx/runtime/clients.go b/internal/mode/static/nginx/runtime/clients.go deleted file mode 100644 index a01a8ef09f..0000000000 --- a/internal/mode/static/nginx/runtime/clients.go +++ /dev/null @@ -1,39 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net" - "net/http" - - "github.com/nginxinc/nginx-plus-go-client/client" -) - -const ( - nginxPlusAPISock = "/var/run/nginx/nginx-plus-api.sock" - nginxPlusAPIURI = "http://nginx-plus-api/api" -) - -// CreatePlusClient returns a client for communicating with the NGINX Plus API. -func CreatePlusClient() (*client.NginxClient, error) { - var plusClient *client.NginxClient - var err error - - httpClient := GetSocketClient(nginxPlusAPISock) - plusClient, err = client.NewNginxClient(nginxPlusAPIURI, client.WithHTTPClient(&httpClient)) - if err != nil { - return nil, fmt.Errorf("failed to create NginxClient for Plus: %w", err) - } - return plusClient, nil -} - -// GetSocketClient gets an http.Client with a unix socket transport. -func GetSocketClient(sockPath string) http.Client { - return http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", sockPath) - }, - }, - } -} diff --git a/internal/mode/static/nginx/runtime/manager.go b/internal/mode/static/nginx/runtime/manager.go deleted file mode 100644 index 8c378f1e99..0000000000 --- a/internal/mode/static/nginx/runtime/manager.go +++ /dev/null @@ -1,284 +0,0 @@ -package runtime - -import ( - "context" - "errors" - "fmt" - "io/fs" - "os" - "strconv" - "strings" - "syscall" - "time" - - "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - "k8s.io/apimachinery/pkg/util/wait" -) - -//go:generate go tool counterfeiter -generate - -const ( - // PidFile specifies the location of the PID file for the Nginx process. - PidFile = "/var/run/nginx/nginx.pid" - // PidFileTimeout defines the timeout duration for accessing the PID file. - PidFileTimeout = 10000 * time.Millisecond - // NginxReloadTimeout sets the timeout duration for reloading the Nginx configuration. - NginxReloadTimeout = 60000 * time.Millisecond -) - -type ( - ReadFileFunc func(string) ([]byte, error) - CheckFileFunc func(string) (fs.FileInfo, error) -) - -var childProcPathFmt = "/proc/%[1]v/task/%[1]v/children" - -//counterfeiter:generate . NginxPlusClient - -type NginxPlusClient interface { - UpdateHTTPServers( - upstream string, - servers []ngxclient.UpstreamServer, - ) ( - added []ngxclient.UpstreamServer, - deleted []ngxclient.UpstreamServer, - updated []ngxclient.UpstreamServer, - err error, - ) - GetUpstreams() (*ngxclient.Upstreams, error) - UpdateStreamServers( - upstream string, - servers []ngxclient.StreamUpstreamServer, - ) ( - added []ngxclient.StreamUpstreamServer, - deleted []ngxclient.StreamUpstreamServer, - updated []ngxclient.StreamUpstreamServer, - err error, - ) - GetStreamUpstreams() (*ngxclient.StreamUpstreams, error) -} - -//counterfeiter:generate . Manager - -// Manager manages the runtime of NGINX. -type Manager interface { - // Reload reloads NGINX configuration. It is a blocking operation. - Reload(ctx context.Context, configVersion int) error - // IsPlus returns whether or not we are running NGINX plus. - IsPlus() bool - // GetUpstreams uses the NGINX Plus API to get the upstreams. - // Only usable if running NGINX Plus. - GetUpstreams() (ngxclient.Upstreams, ngxclient.StreamUpstreams, error) - // UpdateHTTPServers uses the NGINX Plus API to update HTTP upstream servers. - // Only usable if running NGINX Plus. - UpdateHTTPServers(string, []ngxclient.UpstreamServer) error - // UpdateStreamServers uses the NGINX Plus API to update stream upstream servers. - // Only usable if running NGINX Plus. - UpdateStreamServers(string, []ngxclient.StreamUpstreamServer) error -} - -// MetricsCollector is an interface for the metrics of the NGINX runtime manager. -// -//counterfeiter:generate . MetricsCollector -type MetricsCollector interface { - IncReloadCount() - IncReloadErrors() - ObserveLastReloadTime(ms time.Duration) -} - -// ManagerImpl implements Manager. -type ManagerImpl struct { - processHandler ProcessHandler - metricsCollector MetricsCollector - verifyClient nginxConfigVerifier - ngxPlusClient NginxPlusClient - logger logr.Logger -} - -// NewManagerImpl creates a new ManagerImpl. -func NewManagerImpl( - ngxPlusClient NginxPlusClient, - collector MetricsCollector, - logger logr.Logger, - processHandler ProcessHandler, - verifyClient nginxConfigVerifier, -) *ManagerImpl { - return &ManagerImpl{ - processHandler: processHandler, - metricsCollector: collector, - verifyClient: verifyClient, - ngxPlusClient: ngxPlusClient, - logger: logger, - } -} - -// IsPlus returns whether or not we are running NGINX plus. -func (m *ManagerImpl) IsPlus() bool { - return m.ngxPlusClient != nil -} - -func (m *ManagerImpl) Reload(ctx context.Context, configVersion int) error { - start := time.Now() - // We find the main NGINX PID on every reload because it will change if the NGINX container is restarted. - pid, err := m.processHandler.FindMainProcess(ctx, PidFileTimeout) - if err != nil { - return fmt.Errorf("failed to find NGINX main process: %w", err) - } - - childProcFile := fmt.Sprintf(childProcPathFmt, pid) - previousChildProcesses, err := m.processHandler.ReadFile(childProcFile) - if err != nil { - return err - } - - // send HUP signal to the NGINX main process reload configuration - // See https://nginx.org/en/docs/control.html - if errP := m.processHandler.Kill(pid); errP != nil { - m.metricsCollector.IncReloadErrors() - return fmt.Errorf("failed to send the HUP signal to NGINX main: %w", errP) - } - - if err = m.verifyClient.WaitForCorrectVersion( - ctx, - configVersion, - childProcFile, - previousChildProcesses, - os.ReadFile, - ); err != nil { - m.metricsCollector.IncReloadErrors() - return err - } - m.metricsCollector.IncReloadCount() - - finish := time.Now() - m.metricsCollector.ObserveLastReloadTime(finish.Sub(start)) - return nil -} - -// GetUpstreams uses the NGINX Plus API to get the upstreams. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) GetUpstreams() (ngxclient.Upstreams, ngxclient.StreamUpstreams, error) { - if !m.IsPlus() { - panic("cannot get upstream servers: NGINX Plus not enabled") - } - - upstreams, err := m.ngxPlusClient.GetUpstreams() - if err != nil { - return nil, nil, err - } - - if upstreams == nil { - return nil, nil, errors.New("GET upstreams returned nil value") - } - - streamUpstreams, err := m.ngxPlusClient.GetStreamUpstreams() - if err != nil { - return nil, nil, err - } - - if streamUpstreams == nil { - return nil, nil, errors.New("GET stream upstreams returned nil value") - } - - return *upstreams, *streamUpstreams, nil -} - -// UpdateHTTPServers uses the NGINX Plus API to update HTTP upstream servers. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) UpdateHTTPServers(upstream string, servers []ngxclient.UpstreamServer) error { - if !m.IsPlus() { - panic("cannot update HTTP upstream servers: NGINX Plus not enabled") - } - - added, deleted, updated, err := m.ngxPlusClient.UpdateHTTPServers(upstream, servers) - m.logger.V(1).Info("Added upstream servers", "count", len(added)) - m.logger.V(1).Info("Deleted upstream servers", "count", len(deleted)) - m.logger.V(1).Info("Updated upstream servers", "count", len(updated)) - - return err -} - -// UpdateStreamServers uses the NGINX Plus API to update stream upstream servers. -// Only usable if running NGINX Plus. -func (m *ManagerImpl) UpdateStreamServers(upstream string, servers []ngxclient.StreamUpstreamServer) error { - if !m.IsPlus() { - panic("cannot update stream upstream servers: NGINX Plus not enabled") - } - - added, deleted, updated, err := m.ngxPlusClient.UpdateStreamServers(upstream, servers) - m.logger.V(1).Info("Added stream upstream servers", "count", len(added)) - m.logger.V(1).Info("Deleted stream upstream servers", "count", len(deleted)) - m.logger.V(1).Info("Updated stream upstream servers", "count", len(updated)) - - return err -} - -//counterfeiter:generate . ProcessHandler - -type ProcessHandler interface { - FindMainProcess( - ctx context.Context, - timeout time.Duration, - ) (int, error) - ReadFile(file string) ([]byte, error) - Kill(pid int) error -} - -type ProcessHandlerImpl struct { - readFile ReadFileFunc - checkFile CheckFileFunc -} - -func NewProcessHandlerImpl(readFile ReadFileFunc, checkFile CheckFileFunc) *ProcessHandlerImpl { - return &ProcessHandlerImpl{ - readFile: readFile, - checkFile: checkFile, - } -} - -func (p *ProcessHandlerImpl) FindMainProcess( - ctx context.Context, - timeout time.Duration, -) (int, error) { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - err := wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - _, err := p.checkFile(PidFile) - if err == nil { - return true, nil - } - if !errors.Is(err, fs.ErrNotExist) { - return false, err - } - return false, nil - }) - if err != nil { - return 0, err - } - - content, err := p.readFile(PidFile) - if err != nil { - return 0, err - } - - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("invalid pid file content %q: %w", content, err) - } - - return pid, nil -} - -func (p *ProcessHandlerImpl) ReadFile(file string) ([]byte, error) { - return p.readFile(file) -} - -func (p *ProcessHandlerImpl) Kill(pid int) error { - return syscall.Kill(pid, syscall.SIGHUP) -} diff --git a/internal/mode/static/nginx/runtime/manager_test.go b/internal/mode/static/nginx/runtime/manager_test.go deleted file mode 100644 index 1c40c03513..0000000000 --- a/internal/mode/static/nginx/runtime/manager_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package runtime_test - -import ( - "context" - "errors" - "fmt" - "io/fs" - "testing" - "time" - - "github.com/go-logr/logr" - ngxclient "github.com/nginxinc/nginx-plus-go-client/client" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime/runtimefakes" -) - -var _ = Describe("NGINX Runtime Manager", func() { - It("returns whether or not we're using NGINX Plus", func() { - mgr := runtime.NewManagerImpl(nil, nil, logr.Discard(), nil, nil) - Expect(mgr.IsPlus()).To(BeFalse()) - - mgr = runtime.NewManagerImpl(&ngxclient.NginxClient{}, nil, logr.Discard(), nil, nil) - Expect(mgr.IsPlus()).To(BeTrue()) - }) - - var ( - err error - manager runtime.Manager - upstreamServers []ngxclient.UpstreamServer - streamUpstreamServers []ngxclient.StreamUpstreamServer - ngxPlusClient *runtimefakes.FakeNginxPlusClient - process *runtimefakes.FakeProcessHandler - - metrics *runtimefakes.FakeMetricsCollector - verifyClient *runtimefakes.FakeVerifyClient - ) - - BeforeEach(func() { - upstreamServers = []ngxclient.UpstreamServer{ - {}, - } - streamUpstreamServers = []ngxclient.StreamUpstreamServer{ - {}, - } - }) - - Context("Reload", func() { - BeforeEach(func() { - ngxPlusClient = &runtimefakes.FakeNginxPlusClient{} - process = &runtimefakes.FakeProcessHandler{} - metrics = &runtimefakes.FakeMetricsCollector{} - verifyClient = &runtimefakes.FakeVerifyClient{} - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - }) - - It("Is successful", func() { - Expect(manager.Reload(context.Background(), 1)).To(Succeed()) - - Expect(process.FindMainProcessCallCount()).To(Equal(1)) - Expect(process.ReadFileCallCount()).To(Equal(1)) - Expect(process.KillCallCount()).To(Equal(1)) - Expect(metrics.IncReloadCountCallCount()).To(Equal(1)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(1)) - Expect(metrics.ObserveLastReloadTimeCallCount()).To(Equal(1)) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(0)) - }) - - It("Fails to find the main process", func() { - process.FindMainProcessReturns(0, fmt.Errorf("failed to find process")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to find NGINX main process: failed to find process")) - Expect(process.ReadFileCallCount()).To(Equal(0)) - Expect(process.KillCallCount()).To(Equal(0)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("Fails to read file", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns(nil, fmt.Errorf("failed to read file")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to read file")) - Expect(process.KillCallCount()).To(Equal(0)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("Fails to send kill signal", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns([]byte("child1\nchild2"), nil) - process.KillReturns(fmt.Errorf("failed to send kill signal")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("failed to send the HUP signal to NGINX main: failed to send kill signal")) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(1)) - Expect(verifyClient.WaitForCorrectVersionCallCount()).To(Equal(0)) - }) - - It("times out waiting for correct version", func() { - process.FindMainProcessReturns(1234, nil) - process.ReadFileReturns([]byte("child1\nchild2"), nil) - process.KillReturns(nil) - verifyClient.WaitForCorrectVersionReturns(fmt.Errorf("timeout waiting for correct version")) - - err := manager.Reload(context.Background(), 1) - - Expect(err).To(MatchError("timeout waiting for correct version")) - Expect(metrics.IncReloadErrorsCallCount()).To(Equal(1)) - }) - - When("MetricsCollector is nil", func() { - It("panics", func() { - metrics = nil - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - - reload := func() { - err = manager.Reload(context.Background(), 0) - } - - Expect(reload).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - When("VerifyClient is nil", func() { - It("panics", func() { - metrics = &runtimefakes.FakeMetricsCollector{} - verifyClient = nil - manager = runtime.NewManagerImpl(ngxPlusClient, metrics, logr.Discard(), process, verifyClient) - - reload := func() { - err = manager.Reload(context.Background(), 0) - } - - Expect(reload).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) - }) - - When("running NGINX plus", func() { - BeforeEach(func() { - ngxPlusClient = &runtimefakes.FakeNginxPlusClient{} - manager = runtime.NewManagerImpl(ngxPlusClient, nil, logr.Discard(), nil, nil) - }) - - It("successfully updates HTTP server upstream", func() { - Expect(manager.UpdateHTTPServers("test", upstreamServers)).To(Succeed()) - }) - - It("successfully updates stream server upstream", func() { - Expect(manager.UpdateStreamServers("test", streamUpstreamServers)).To(Succeed()) - }) - - It("returns no upstreams from NGINX Plus API when upstreams are nil", func() { - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(upstreams).To(BeEmpty()) - Expect(streamUpstreams).To(BeEmpty()) - }) - - It("successfully returns server upstreams", func() { - expUpstreams := ngxclient.Upstreams{ - "upstream1": { - Zone: "zone1", - Peers: []ngxclient.Peer{ - {ID: 1, Name: "peer1-name"}, - }, - Queue: ngxclient.Queue{Size: 10}, - Zombies: 2, - }, - "upstream2": { - Zone: "zone2", - Peers: []ngxclient.Peer{ - {ID: 2, Name: "peer2-name"}, - }, - Queue: ngxclient.Queue{Size: 20}, - Zombies: 1, - }, - } - - expStreamUpstreams := ngxclient.StreamUpstreams{ - "upstream1": { - Zone: "zone1", - Peers: []ngxclient.StreamPeer{ - {ID: 1, Name: "peer1-name"}, - }, - Zombies: 2, - }, - "upstream2": { - Zone: "zone2", - Peers: []ngxclient.StreamPeer{ - {ID: 2, Name: "peer2-name"}, - }, - Zombies: 1, - }, - } - - ngxPlusClient.GetUpstreamsReturns(&expUpstreams, nil) - ngxPlusClient.GetStreamUpstreamsReturns(&expStreamUpstreams, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).NotTo(HaveOccurred()) - Expect(expUpstreams).To(Equal(upstreams)) - Expect(expStreamUpstreams).To(Equal(streamUpstreams)) - }) - - It("returns an error when GetUpstreams fails", func() { - ngxPlusClient.GetUpstreamsReturns(nil, errors.New("failed to get upstreams")) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("failed to get upstreams")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetUpstreams returns nil", func() { - ngxPlusClient.GetUpstreamsReturns(nil, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("GET upstreams returned nil value")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetStreamUpstreams fails", func() { - ngxPlusClient.GetUpstreamsReturns(&ngxclient.Upstreams{}, nil) - ngxPlusClient.GetStreamUpstreamsReturns(nil, errors.New("failed to get upstreams")) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("failed to get upstreams")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - - It("returns an error when GetStreamUpstreams returns nil", func() { - ngxPlusClient.GetUpstreamsReturns(&ngxclient.Upstreams{}, nil) - ngxPlusClient.GetStreamUpstreamsReturns(nil, nil) - - upstreams, streamUpstreams, err := manager.GetUpstreams() - - Expect(err).To(HaveOccurred()) - Expect(err).To(MatchError("GET stream upstreams returned nil value")) - Expect(upstreams).To(BeNil()) - Expect(streamUpstreams).To(BeNil()) - }) - }) - - When("not running NGINX plus", func() { - BeforeEach(func() { - ngxPlusClient = nil - manager = runtime.NewManagerImpl(ngxPlusClient, nil, logr.Discard(), nil, nil) - }) - - It("should panic when fetching upstream servers", func() { - upstreams := func() { - _, _, err = manager.GetUpstreams() - } - - Expect(upstreams).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should panic when updating HTTP upstream servers", func() { - updateServers := func() { - err = manager.UpdateHTTPServers("test", upstreamServers) - } - - Expect(updateServers).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should panic when updating stream upstream servers", func() { - updateServers := func() { - err = manager.UpdateStreamServers("test", streamUpstreamServers) - } - - Expect(updateServers).To(Panic()) - Expect(err).ToNot(HaveOccurred()) - }) - }) -}) - -func TestFindMainProcess(t *testing.T) { - t.Parallel() - readFileFuncGen := func(content []byte) runtime.ReadFileFunc { - return func(name string) ([]byte, error) { - if name != runtime.PidFile { - return nil, errors.New("error") - } - return content, nil - } - } - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - checkFileFuncGen := func(content fs.FileInfo) runtime.CheckFileFunc { - return func(name string) (fs.FileInfo, error) { - if name != runtime.PidFile { - return nil, errors.New("error") - } - return content, nil - } - } - checkFileError := func(string) (fs.FileInfo, error) { - return nil, errors.New("error") - } - var testFileInfo fs.FileInfo - ctx := context.Background() - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel) - - tests := []struct { - ctx context.Context - readFile runtime.ReadFileFunc - checkFile runtime.CheckFileFunc - name string - expected int - expectError bool - }{ - { - ctx: ctx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 1, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "empty file content", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("not a number")), - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "bad file content", - }, - { - ctx: ctx, - readFile: readFileError, - checkFile: checkFileFuncGen(testFileInfo), - expected: 0, - expectError: true, - name: "cannot read file", - }, - { - ctx: ctx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileError, - expected: 0, - expectError: true, - name: "cannot find pid file", - }, - { - ctx: cancellingCtx, - readFile: readFileFuncGen([]byte("1\n")), - checkFile: checkFileError, - expected: 0, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - p := runtime.NewProcessHandlerImpl( - test.readFile, - test.checkFile) - result, err := p.FindMainProcess(test.ctx, 2*time.Millisecond) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(test.expected)) - } - }) - } -} diff --git a/internal/mode/static/nginx/runtime/runtime_suite_test.go b/internal/mode/static/nginx/runtime/runtime_suite_test.go deleted file mode 100644 index 8916c4bf14..0000000000 --- a/internal/mode/static/nginx/runtime/runtime_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package runtime_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestRuntime(t *testing.T) { - t.Parallel() - RegisterFailHandler(Fail) - RunSpecs(t, "Runtime Suite") -} diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go deleted file mode 100644 index ef8bd1668f..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_manager.go +++ /dev/null @@ -1,417 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginxinc/nginx-plus-go-client/client" -) - -type FakeManager struct { - GetUpstreamsStub func() (client.Upstreams, client.StreamUpstreams, error) - getUpstreamsMutex sync.RWMutex - getUpstreamsArgsForCall []struct { - } - getUpstreamsReturns struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - } - getUpstreamsReturnsOnCall map[int]struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - } - IsPlusStub func() bool - isPlusMutex sync.RWMutex - isPlusArgsForCall []struct { - } - isPlusReturns struct { - result1 bool - } - isPlusReturnsOnCall map[int]struct { - result1 bool - } - ReloadStub func(context.Context, int) error - reloadMutex sync.RWMutex - reloadArgsForCall []struct { - arg1 context.Context - arg2 int - } - reloadReturns struct { - result1 error - } - reloadReturnsOnCall map[int]struct { - result1 error - } - UpdateHTTPServersStub func(string, []client.UpstreamServer) error - updateHTTPServersMutex sync.RWMutex - updateHTTPServersArgsForCall []struct { - arg1 string - arg2 []client.UpstreamServer - } - updateHTTPServersReturns struct { - result1 error - } - updateHTTPServersReturnsOnCall map[int]struct { - result1 error - } - UpdateStreamServersStub func(string, []client.StreamUpstreamServer) error - updateStreamServersMutex sync.RWMutex - updateStreamServersArgsForCall []struct { - arg1 string - arg2 []client.StreamUpstreamServer - } - updateStreamServersReturns struct { - result1 error - } - updateStreamServersReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) GetUpstreams() (client.Upstreams, client.StreamUpstreams, error) { - fake.getUpstreamsMutex.Lock() - ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] - fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetUpstreamsStub - fakeReturns := fake.getUpstreamsReturns - fake.recordInvocation("GetUpstreams", []interface{}{}) - fake.getUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2, ret.result3 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 -} - -func (fake *FakeManager) GetUpstreamsCallCount() int { - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - return len(fake.getUpstreamsArgsForCall) -} - -func (fake *FakeManager) GetUpstreamsCalls(stub func() (client.Upstreams, client.StreamUpstreams, error)) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = stub -} - -func (fake *FakeManager) GetUpstreamsReturns(result1 client.Upstreams, result2 client.StreamUpstreams, result3 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - fake.getUpstreamsReturns = struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }{result1, result2, result3} -} - -func (fake *FakeManager) GetUpstreamsReturnsOnCall(i int, result1 client.Upstreams, result2 client.StreamUpstreams, result3 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - if fake.getUpstreamsReturnsOnCall == nil { - fake.getUpstreamsReturnsOnCall = make(map[int]struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }) - } - fake.getUpstreamsReturnsOnCall[i] = struct { - result1 client.Upstreams - result2 client.StreamUpstreams - result3 error - }{result1, result2, result3} -} - -func (fake *FakeManager) IsPlus() bool { - fake.isPlusMutex.Lock() - ret, specificReturn := fake.isPlusReturnsOnCall[len(fake.isPlusArgsForCall)] - fake.isPlusArgsForCall = append(fake.isPlusArgsForCall, struct { - }{}) - stub := fake.IsPlusStub - fakeReturns := fake.isPlusReturns - fake.recordInvocation("IsPlus", []interface{}{}) - fake.isPlusMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) IsPlusCallCount() int { - fake.isPlusMutex.RLock() - defer fake.isPlusMutex.RUnlock() - return len(fake.isPlusArgsForCall) -} - -func (fake *FakeManager) IsPlusCalls(stub func() bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = stub -} - -func (fake *FakeManager) IsPlusReturns(result1 bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = nil - fake.isPlusReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeManager) IsPlusReturnsOnCall(i int, result1 bool) { - fake.isPlusMutex.Lock() - defer fake.isPlusMutex.Unlock() - fake.IsPlusStub = nil - if fake.isPlusReturnsOnCall == nil { - fake.isPlusReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isPlusReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeManager) Reload(arg1 context.Context, arg2 int) error { - fake.reloadMutex.Lock() - ret, specificReturn := fake.reloadReturnsOnCall[len(fake.reloadArgsForCall)] - fake.reloadArgsForCall = append(fake.reloadArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.ReloadStub - fakeReturns := fake.reloadReturns - fake.recordInvocation("Reload", []interface{}{arg1, arg2}) - fake.reloadMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) ReloadCallCount() int { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - return len(fake.reloadArgsForCall) -} - -func (fake *FakeManager) ReloadCalls(stub func(context.Context, int) error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = stub -} - -func (fake *FakeManager) ReloadArgsForCall(i int) (context.Context, int) { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - argsForCall := fake.reloadArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) ReloadReturns(result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - fake.reloadReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) ReloadReturnsOnCall(i int, result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - if fake.reloadReturnsOnCall == nil { - fake.reloadReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.reloadReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateHTTPServers(arg1 string, arg2 []client.UpstreamServer) error { - var arg2Copy []client.UpstreamServer - if arg2 != nil { - arg2Copy = make([]client.UpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateHTTPServersMutex.Lock() - ret, specificReturn := fake.updateHTTPServersReturnsOnCall[len(fake.updateHTTPServersArgsForCall)] - fake.updateHTTPServersArgsForCall = append(fake.updateHTTPServersArgsForCall, struct { - arg1 string - arg2 []client.UpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateHTTPServersStub - fakeReturns := fake.updateHTTPServersReturns - fake.recordInvocation("UpdateHTTPServers", []interface{}{arg1, arg2Copy}) - fake.updateHTTPServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) UpdateHTTPServersCallCount() int { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - return len(fake.updateHTTPServersArgsForCall) -} - -func (fake *FakeManager) UpdateHTTPServersCalls(stub func(string, []client.UpstreamServer) error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = stub -} - -func (fake *FakeManager) UpdateHTTPServersArgsForCall(i int) (string, []client.UpstreamServer) { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - argsForCall := fake.updateHTTPServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) UpdateHTTPServersReturns(result1 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - fake.updateHTTPServersReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateHTTPServersReturnsOnCall(i int, result1 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - if fake.updateHTTPServersReturnsOnCall == nil { - fake.updateHTTPServersReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.updateHTTPServersReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateStreamServers(arg1 string, arg2 []client.StreamUpstreamServer) error { - var arg2Copy []client.StreamUpstreamServer - if arg2 != nil { - arg2Copy = make([]client.StreamUpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateStreamServersMutex.Lock() - ret, specificReturn := fake.updateStreamServersReturnsOnCall[len(fake.updateStreamServersArgsForCall)] - fake.updateStreamServersArgsForCall = append(fake.updateStreamServersArgsForCall, struct { - arg1 string - arg2 []client.StreamUpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateStreamServersStub - fakeReturns := fake.updateStreamServersReturns - fake.recordInvocation("UpdateStreamServers", []interface{}{arg1, arg2Copy}) - fake.updateStreamServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) UpdateStreamServersCallCount() int { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - return len(fake.updateStreamServersArgsForCall) -} - -func (fake *FakeManager) UpdateStreamServersCalls(stub func(string, []client.StreamUpstreamServer) error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = stub -} - -func (fake *FakeManager) UpdateStreamServersArgsForCall(i int) (string, []client.StreamUpstreamServer) { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - argsForCall := fake.updateStreamServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) UpdateStreamServersReturns(result1 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - fake.updateStreamServersReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) UpdateStreamServersReturnsOnCall(i int, result1 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - if fake.updateStreamServersReturnsOnCall == nil { - fake.updateStreamServersReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.updateStreamServersReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - fake.isPlusMutex.RLock() - defer fake.isPlusMutex.RUnlock() - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.Manager = new(FakeManager) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go deleted file mode 100644 index 10215a6758..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_metrics_collector.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "sync" - "time" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeMetricsCollector struct { - IncReloadCountStub func() - incReloadCountMutex sync.RWMutex - incReloadCountArgsForCall []struct { - } - IncReloadErrorsStub func() - incReloadErrorsMutex sync.RWMutex - incReloadErrorsArgsForCall []struct { - } - ObserveLastReloadTimeStub func(time.Duration) - observeLastReloadTimeMutex sync.RWMutex - observeLastReloadTimeArgsForCall []struct { - arg1 time.Duration - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeMetricsCollector) IncReloadCount() { - fake.incReloadCountMutex.Lock() - fake.incReloadCountArgsForCall = append(fake.incReloadCountArgsForCall, struct { - }{}) - stub := fake.IncReloadCountStub - fake.recordInvocation("IncReloadCount", []interface{}{}) - fake.incReloadCountMutex.Unlock() - if stub != nil { - fake.IncReloadCountStub() - } -} - -func (fake *FakeMetricsCollector) IncReloadCountCallCount() int { - fake.incReloadCountMutex.RLock() - defer fake.incReloadCountMutex.RUnlock() - return len(fake.incReloadCountArgsForCall) -} - -func (fake *FakeMetricsCollector) IncReloadCountCalls(stub func()) { - fake.incReloadCountMutex.Lock() - defer fake.incReloadCountMutex.Unlock() - fake.IncReloadCountStub = stub -} - -func (fake *FakeMetricsCollector) IncReloadErrors() { - fake.incReloadErrorsMutex.Lock() - fake.incReloadErrorsArgsForCall = append(fake.incReloadErrorsArgsForCall, struct { - }{}) - stub := fake.IncReloadErrorsStub - fake.recordInvocation("IncReloadErrors", []interface{}{}) - fake.incReloadErrorsMutex.Unlock() - if stub != nil { - fake.IncReloadErrorsStub() - } -} - -func (fake *FakeMetricsCollector) IncReloadErrorsCallCount() int { - fake.incReloadErrorsMutex.RLock() - defer fake.incReloadErrorsMutex.RUnlock() - return len(fake.incReloadErrorsArgsForCall) -} - -func (fake *FakeMetricsCollector) IncReloadErrorsCalls(stub func()) { - fake.incReloadErrorsMutex.Lock() - defer fake.incReloadErrorsMutex.Unlock() - fake.IncReloadErrorsStub = stub -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTime(arg1 time.Duration) { - fake.observeLastReloadTimeMutex.Lock() - fake.observeLastReloadTimeArgsForCall = append(fake.observeLastReloadTimeArgsForCall, struct { - arg1 time.Duration - }{arg1}) - stub := fake.ObserveLastReloadTimeStub - fake.recordInvocation("ObserveLastReloadTime", []interface{}{arg1}) - fake.observeLastReloadTimeMutex.Unlock() - if stub != nil { - fake.ObserveLastReloadTimeStub(arg1) - } -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeCallCount() int { - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - return len(fake.observeLastReloadTimeArgsForCall) -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeCalls(stub func(time.Duration)) { - fake.observeLastReloadTimeMutex.Lock() - defer fake.observeLastReloadTimeMutex.Unlock() - fake.ObserveLastReloadTimeStub = stub -} - -func (fake *FakeMetricsCollector) ObserveLastReloadTimeArgsForCall(i int) time.Duration { - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - argsForCall := fake.observeLastReloadTimeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeMetricsCollector) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.incReloadCountMutex.RLock() - defer fake.incReloadCountMutex.RUnlock() - fake.incReloadErrorsMutex.RLock() - defer fake.incReloadErrorsMutex.RUnlock() - fake.observeLastReloadTimeMutex.RLock() - defer fake.observeLastReloadTimeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeMetricsCollector) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.MetricsCollector = new(FakeMetricsCollector) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go deleted file mode 100644 index 3b1e522c72..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_config_verifier.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeNginxConfigVerifier struct { - EnsureConfigVersionStub func(context.Context, int) error - ensureConfigVersionMutex sync.RWMutex - ensureConfigVersionArgsForCall []struct { - arg1 context.Context - arg2 int - } - ensureConfigVersionReturns struct { - result1 error - } - ensureConfigVersionReturnsOnCall map[int]struct { - result1 error - } - GetConfigVersionStub func() (int, error) - getConfigVersionMutex sync.RWMutex - getConfigVersionArgsForCall []struct { - } - getConfigVersionReturns struct { - result1 int - result2 error - } - getConfigVersionReturnsOnCall map[int]struct { - result1 int - result2 error - } - WaitForCorrectVersionStub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error - waitForCorrectVersionMutex sync.RWMutex - waitForCorrectVersionArgsForCall []struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - } - waitForCorrectVersionReturns struct { - result1 error - } - waitForCorrectVersionReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersion(arg1 context.Context, arg2 int) error { - fake.ensureConfigVersionMutex.Lock() - ret, specificReturn := fake.ensureConfigVersionReturnsOnCall[len(fake.ensureConfigVersionArgsForCall)] - fake.ensureConfigVersionArgsForCall = append(fake.ensureConfigVersionArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.EnsureConfigVersionStub - fakeReturns := fake.ensureConfigVersionReturns - fake.recordInvocation("EnsureConfigVersion", []interface{}{arg1, arg2}) - fake.ensureConfigVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionCallCount() int { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - return len(fake.ensureConfigVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionCalls(stub func(context.Context, int) error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionArgsForCall(i int) (context.Context, int) { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - argsForCall := fake.ensureConfigVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionReturns(result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - fake.ensureConfigVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) EnsureConfigVersionReturnsOnCall(i int, result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - if fake.ensureConfigVersionReturnsOnCall == nil { - fake.ensureConfigVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.ensureConfigVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersion() (int, error) { - fake.getConfigVersionMutex.Lock() - ret, specificReturn := fake.getConfigVersionReturnsOnCall[len(fake.getConfigVersionArgsForCall)] - fake.getConfigVersionArgsForCall = append(fake.getConfigVersionArgsForCall, struct { - }{}) - stub := fake.GetConfigVersionStub - fakeReturns := fake.getConfigVersionReturns - fake.recordInvocation("GetConfigVersion", []interface{}{}) - fake.getConfigVersionMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionCallCount() int { - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - return len(fake.getConfigVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionCalls(stub func() (int, error)) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionReturns(result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - fake.getConfigVersionReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeNginxConfigVerifier) GetConfigVersionReturnsOnCall(i int, result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - if fake.getConfigVersionReturnsOnCall == nil { - fake.getConfigVersionReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.getConfigVersionReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersion(arg1 context.Context, arg2 int, arg3 string, arg4 []byte, arg5 runtime.ReadFileFunc) error { - var arg4Copy []byte - if arg4 != nil { - arg4Copy = make([]byte, len(arg4)) - copy(arg4Copy, arg4) - } - fake.waitForCorrectVersionMutex.Lock() - ret, specificReturn := fake.waitForCorrectVersionReturnsOnCall[len(fake.waitForCorrectVersionArgsForCall)] - fake.waitForCorrectVersionArgsForCall = append(fake.waitForCorrectVersionArgsForCall, struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - }{arg1, arg2, arg3, arg4Copy, arg5}) - stub := fake.WaitForCorrectVersionStub - fakeReturns := fake.waitForCorrectVersionReturns - fake.recordInvocation("WaitForCorrectVersion", []interface{}{arg1, arg2, arg3, arg4Copy, arg5}) - fake.waitForCorrectVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3, arg4, arg5) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionCallCount() int { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - return len(fake.waitForCorrectVersionArgsForCall) -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionCalls(stub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = stub -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionArgsForCall(i int) (context.Context, int, string, []byte, runtime.ReadFileFunc) { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - argsForCall := fake.waitForCorrectVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionReturns(result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - fake.waitForCorrectVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) WaitForCorrectVersionReturnsOnCall(i int, result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - if fake.waitForCorrectVersionReturnsOnCall == nil { - fake.waitForCorrectVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.waitForCorrectVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeNginxConfigVerifier) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeNginxConfigVerifier) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go deleted file mode 100644 index 0a5065e74b..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_nginx_plus_client.go +++ /dev/null @@ -1,370 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" - "github.com/nginxinc/nginx-plus-go-client/client" -) - -type FakeNginxPlusClient struct { - GetStreamUpstreamsStub func() (*client.StreamUpstreams, error) - getStreamUpstreamsMutex sync.RWMutex - getStreamUpstreamsArgsForCall []struct { - } - getStreamUpstreamsReturns struct { - result1 *client.StreamUpstreams - result2 error - } - getStreamUpstreamsReturnsOnCall map[int]struct { - result1 *client.StreamUpstreams - result2 error - } - GetUpstreamsStub func() (*client.Upstreams, error) - getUpstreamsMutex sync.RWMutex - getUpstreamsArgsForCall []struct { - } - getUpstreamsReturns struct { - result1 *client.Upstreams - result2 error - } - getUpstreamsReturnsOnCall map[int]struct { - result1 *client.Upstreams - result2 error - } - UpdateHTTPServersStub func(string, []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) - updateHTTPServersMutex sync.RWMutex - updateHTTPServersArgsForCall []struct { - arg1 string - arg2 []client.UpstreamServer - } - updateHTTPServersReturns struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - } - updateHTTPServersReturnsOnCall map[int]struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - } - UpdateStreamServersStub func(string, []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) - updateStreamServersMutex sync.RWMutex - updateStreamServersArgsForCall []struct { - arg1 string - arg2 []client.StreamUpstreamServer - } - updateStreamServersReturns struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - } - updateStreamServersReturnsOnCall map[int]struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreams() (*client.StreamUpstreams, error) { - fake.getStreamUpstreamsMutex.Lock() - ret, specificReturn := fake.getStreamUpstreamsReturnsOnCall[len(fake.getStreamUpstreamsArgsForCall)] - fake.getStreamUpstreamsArgsForCall = append(fake.getStreamUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetStreamUpstreamsStub - fakeReturns := fake.getStreamUpstreamsReturns - fake.recordInvocation("GetStreamUpstreams", []interface{}{}) - fake.getStreamUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsCallCount() int { - fake.getStreamUpstreamsMutex.RLock() - defer fake.getStreamUpstreamsMutex.RUnlock() - return len(fake.getStreamUpstreamsArgsForCall) -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsCalls(stub func() (*client.StreamUpstreams, error)) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = stub -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsReturns(result1 *client.StreamUpstreams, result2 error) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = nil - fake.getStreamUpstreamsReturns = struct { - result1 *client.StreamUpstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetStreamUpstreamsReturnsOnCall(i int, result1 *client.StreamUpstreams, result2 error) { - fake.getStreamUpstreamsMutex.Lock() - defer fake.getStreamUpstreamsMutex.Unlock() - fake.GetStreamUpstreamsStub = nil - if fake.getStreamUpstreamsReturnsOnCall == nil { - fake.getStreamUpstreamsReturnsOnCall = make(map[int]struct { - result1 *client.StreamUpstreams - result2 error - }) - } - fake.getStreamUpstreamsReturnsOnCall[i] = struct { - result1 *client.StreamUpstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetUpstreams() (*client.Upstreams, error) { - fake.getUpstreamsMutex.Lock() - ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] - fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { - }{}) - stub := fake.GetUpstreamsStub - fakeReturns := fake.getUpstreamsReturns - fake.recordInvocation("GetUpstreams", []interface{}{}) - fake.getUpstreamsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeNginxPlusClient) GetUpstreamsCallCount() int { - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - return len(fake.getUpstreamsArgsForCall) -} - -func (fake *FakeNginxPlusClient) GetUpstreamsCalls(stub func() (*client.Upstreams, error)) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = stub -} - -func (fake *FakeNginxPlusClient) GetUpstreamsReturns(result1 *client.Upstreams, result2 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - fake.getUpstreamsReturns = struct { - result1 *client.Upstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) GetUpstreamsReturnsOnCall(i int, result1 *client.Upstreams, result2 error) { - fake.getUpstreamsMutex.Lock() - defer fake.getUpstreamsMutex.Unlock() - fake.GetUpstreamsStub = nil - if fake.getUpstreamsReturnsOnCall == nil { - fake.getUpstreamsReturnsOnCall = make(map[int]struct { - result1 *client.Upstreams - result2 error - }) - } - fake.getUpstreamsReturnsOnCall[i] = struct { - result1 *client.Upstreams - result2 error - }{result1, result2} -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServers(arg1 string, arg2 []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) { - var arg2Copy []client.UpstreamServer - if arg2 != nil { - arg2Copy = make([]client.UpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateHTTPServersMutex.Lock() - ret, specificReturn := fake.updateHTTPServersReturnsOnCall[len(fake.updateHTTPServersArgsForCall)] - fake.updateHTTPServersArgsForCall = append(fake.updateHTTPServersArgsForCall, struct { - arg1 string - arg2 []client.UpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateHTTPServersStub - fakeReturns := fake.updateHTTPServersReturns - fake.recordInvocation("UpdateHTTPServers", []interface{}{arg1, arg2Copy}) - fake.updateHTTPServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2, ret.result3, ret.result4 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersCallCount() int { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - return len(fake.updateHTTPServersArgsForCall) -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersCalls(stub func(string, []client.UpstreamServer) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error)) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = stub -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersArgsForCall(i int) (string, []client.UpstreamServer) { - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - argsForCall := fake.updateHTTPServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersReturns(result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - fake.updateHTTPServersReturns = struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateHTTPServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { - fake.updateHTTPServersMutex.Lock() - defer fake.updateHTTPServersMutex.Unlock() - fake.UpdateHTTPServersStub = nil - if fake.updateHTTPServersReturnsOnCall == nil { - fake.updateHTTPServersReturnsOnCall = make(map[int]struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }) - } - fake.updateHTTPServersReturnsOnCall[i] = struct { - result1 []client.UpstreamServer - result2 []client.UpstreamServer - result3 []client.UpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateStreamServers(arg1 string, arg2 []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) { - var arg2Copy []client.StreamUpstreamServer - if arg2 != nil { - arg2Copy = make([]client.StreamUpstreamServer, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateStreamServersMutex.Lock() - ret, specificReturn := fake.updateStreamServersReturnsOnCall[len(fake.updateStreamServersArgsForCall)] - fake.updateStreamServersArgsForCall = append(fake.updateStreamServersArgsForCall, struct { - arg1 string - arg2 []client.StreamUpstreamServer - }{arg1, arg2Copy}) - stub := fake.UpdateStreamServersStub - fakeReturns := fake.updateStreamServersReturns - fake.recordInvocation("UpdateStreamServers", []interface{}{arg1, arg2Copy}) - fake.updateStreamServersMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2, ret.result3, ret.result4 - } - return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersCallCount() int { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - return len(fake.updateStreamServersArgsForCall) -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersCalls(stub func(string, []client.StreamUpstreamServer) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error)) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = stub -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersArgsForCall(i int) (string, []client.StreamUpstreamServer) { - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - argsForCall := fake.updateStreamServersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersReturns(result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - fake.updateStreamServersReturns = struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) UpdateStreamServersReturnsOnCall(i int, result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { - fake.updateStreamServersMutex.Lock() - defer fake.updateStreamServersMutex.Unlock() - fake.UpdateStreamServersStub = nil - if fake.updateStreamServersReturnsOnCall == nil { - fake.updateStreamServersReturnsOnCall = make(map[int]struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }) - } - fake.updateStreamServersReturnsOnCall[i] = struct { - result1 []client.StreamUpstreamServer - result2 []client.StreamUpstreamServer - result3 []client.StreamUpstreamServer - result4 error - }{result1, result2, result3, result4} -} - -func (fake *FakeNginxPlusClient) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getStreamUpstreamsMutex.RLock() - defer fake.getStreamUpstreamsMutex.RUnlock() - fake.getUpstreamsMutex.RLock() - defer fake.getUpstreamsMutex.RUnlock() - fake.updateHTTPServersMutex.RLock() - defer fake.updateHTTPServersMutex.RUnlock() - fake.updateStreamServersMutex.RLock() - defer fake.updateStreamServersMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeNginxPlusClient) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.NginxPlusClient = new(FakeNginxPlusClient) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go deleted file mode 100644 index 481a354f25..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_process_handler.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - "time" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeProcessHandler struct { - FindMainProcessStub func(context.Context, time.Duration) (int, error) - findMainProcessMutex sync.RWMutex - findMainProcessArgsForCall []struct { - arg1 context.Context - arg2 time.Duration - } - findMainProcessReturns struct { - result1 int - result2 error - } - findMainProcessReturnsOnCall map[int]struct { - result1 int - result2 error - } - KillStub func(int) error - killMutex sync.RWMutex - killArgsForCall []struct { - arg1 int - } - killReturns struct { - result1 error - } - killReturnsOnCall map[int]struct { - result1 error - } - ReadFileStub func(string) ([]byte, error) - readFileMutex sync.RWMutex - readFileArgsForCall []struct { - arg1 string - } - readFileReturns struct { - result1 []byte - result2 error - } - readFileReturnsOnCall map[int]struct { - result1 []byte - result2 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeProcessHandler) FindMainProcess(arg1 context.Context, arg2 time.Duration) (int, error) { - fake.findMainProcessMutex.Lock() - ret, specificReturn := fake.findMainProcessReturnsOnCall[len(fake.findMainProcessArgsForCall)] - fake.findMainProcessArgsForCall = append(fake.findMainProcessArgsForCall, struct { - arg1 context.Context - arg2 time.Duration - }{arg1, arg2}) - stub := fake.FindMainProcessStub - fakeReturns := fake.findMainProcessReturns - fake.recordInvocation("FindMainProcess", []interface{}{arg1, arg2}) - fake.findMainProcessMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeProcessHandler) FindMainProcessCallCount() int { - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - return len(fake.findMainProcessArgsForCall) -} - -func (fake *FakeProcessHandler) FindMainProcessCalls(stub func(context.Context, time.Duration) (int, error)) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = stub -} - -func (fake *FakeProcessHandler) FindMainProcessArgsForCall(i int) (context.Context, time.Duration) { - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - argsForCall := fake.findMainProcessArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeProcessHandler) FindMainProcessReturns(result1 int, result2 error) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = nil - fake.findMainProcessReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) FindMainProcessReturnsOnCall(i int, result1 int, result2 error) { - fake.findMainProcessMutex.Lock() - defer fake.findMainProcessMutex.Unlock() - fake.FindMainProcessStub = nil - if fake.findMainProcessReturnsOnCall == nil { - fake.findMainProcessReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.findMainProcessReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) Kill(arg1 int) error { - fake.killMutex.Lock() - ret, specificReturn := fake.killReturnsOnCall[len(fake.killArgsForCall)] - fake.killArgsForCall = append(fake.killArgsForCall, struct { - arg1 int - }{arg1}) - stub := fake.KillStub - fakeReturns := fake.killReturns - fake.recordInvocation("Kill", []interface{}{arg1}) - fake.killMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeProcessHandler) KillCallCount() int { - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - return len(fake.killArgsForCall) -} - -func (fake *FakeProcessHandler) KillCalls(stub func(int) error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = stub -} - -func (fake *FakeProcessHandler) KillArgsForCall(i int) int { - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - argsForCall := fake.killArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeProcessHandler) KillReturns(result1 error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = nil - fake.killReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeProcessHandler) KillReturnsOnCall(i int, result1 error) { - fake.killMutex.Lock() - defer fake.killMutex.Unlock() - fake.KillStub = nil - if fake.killReturnsOnCall == nil { - fake.killReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.killReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeProcessHandler) ReadFile(arg1 string) ([]byte, error) { - fake.readFileMutex.Lock() - ret, specificReturn := fake.readFileReturnsOnCall[len(fake.readFileArgsForCall)] - fake.readFileArgsForCall = append(fake.readFileArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadFileStub - fakeReturns := fake.readFileReturns - fake.recordInvocation("ReadFile", []interface{}{arg1}) - fake.readFileMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeProcessHandler) ReadFileCallCount() int { - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - return len(fake.readFileArgsForCall) -} - -func (fake *FakeProcessHandler) ReadFileCalls(stub func(string) ([]byte, error)) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = stub -} - -func (fake *FakeProcessHandler) ReadFileArgsForCall(i int) string { - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - argsForCall := fake.readFileArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeProcessHandler) ReadFileReturns(result1 []byte, result2 error) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = nil - fake.readFileReturns = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) ReadFileReturnsOnCall(i int, result1 []byte, result2 error) { - fake.readFileMutex.Lock() - defer fake.readFileMutex.Unlock() - fake.ReadFileStub = nil - if fake.readFileReturnsOnCall == nil { - fake.readFileReturnsOnCall = make(map[int]struct { - result1 []byte - result2 error - }) - } - fake.readFileReturnsOnCall[i] = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *FakeProcessHandler) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.findMainProcessMutex.RLock() - defer fake.findMainProcessMutex.RUnlock() - fake.killMutex.RLock() - defer fake.killMutex.RUnlock() - fake.readFileMutex.RLock() - defer fake.readFileMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeProcessHandler) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.ProcessHandler = new(FakeProcessHandler) diff --git a/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go b/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go deleted file mode 100644 index 8c6aa7c426..0000000000 --- a/internal/mode/static/nginx/runtime/runtimefakes/fake_verify_client.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/runtime" -) - -type FakeVerifyClient struct { - EnsureConfigVersionStub func(context.Context, int) error - ensureConfigVersionMutex sync.RWMutex - ensureConfigVersionArgsForCall []struct { - arg1 context.Context - arg2 int - } - ensureConfigVersionReturns struct { - result1 error - } - ensureConfigVersionReturnsOnCall map[int]struct { - result1 error - } - GetConfigVersionStub func() (int, error) - getConfigVersionMutex sync.RWMutex - getConfigVersionArgsForCall []struct { - } - getConfigVersionReturns struct { - result1 int - result2 error - } - getConfigVersionReturnsOnCall map[int]struct { - result1 int - result2 error - } - WaitForCorrectVersionStub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error - waitForCorrectVersionMutex sync.RWMutex - waitForCorrectVersionArgsForCall []struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - } - waitForCorrectVersionReturns struct { - result1 error - } - waitForCorrectVersionReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeVerifyClient) EnsureConfigVersion(arg1 context.Context, arg2 int) error { - fake.ensureConfigVersionMutex.Lock() - ret, specificReturn := fake.ensureConfigVersionReturnsOnCall[len(fake.ensureConfigVersionArgsForCall)] - fake.ensureConfigVersionArgsForCall = append(fake.ensureConfigVersionArgsForCall, struct { - arg1 context.Context - arg2 int - }{arg1, arg2}) - stub := fake.EnsureConfigVersionStub - fakeReturns := fake.ensureConfigVersionReturns - fake.recordInvocation("EnsureConfigVersion", []interface{}{arg1, arg2}) - fake.ensureConfigVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeVerifyClient) EnsureConfigVersionCallCount() int { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - return len(fake.ensureConfigVersionArgsForCall) -} - -func (fake *FakeVerifyClient) EnsureConfigVersionCalls(stub func(context.Context, int) error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = stub -} - -func (fake *FakeVerifyClient) EnsureConfigVersionArgsForCall(i int) (context.Context, int) { - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - argsForCall := fake.ensureConfigVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeVerifyClient) EnsureConfigVersionReturns(result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - fake.ensureConfigVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) EnsureConfigVersionReturnsOnCall(i int, result1 error) { - fake.ensureConfigVersionMutex.Lock() - defer fake.ensureConfigVersionMutex.Unlock() - fake.EnsureConfigVersionStub = nil - if fake.ensureConfigVersionReturnsOnCall == nil { - fake.ensureConfigVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.ensureConfigVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) GetConfigVersion() (int, error) { - fake.getConfigVersionMutex.Lock() - ret, specificReturn := fake.getConfigVersionReturnsOnCall[len(fake.getConfigVersionArgsForCall)] - fake.getConfigVersionArgsForCall = append(fake.getConfigVersionArgsForCall, struct { - }{}) - stub := fake.GetConfigVersionStub - fakeReturns := fake.getConfigVersionReturns - fake.recordInvocation("GetConfigVersion", []interface{}{}) - fake.getConfigVersionMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeVerifyClient) GetConfigVersionCallCount() int { - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - return len(fake.getConfigVersionArgsForCall) -} - -func (fake *FakeVerifyClient) GetConfigVersionCalls(stub func() (int, error)) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = stub -} - -func (fake *FakeVerifyClient) GetConfigVersionReturns(result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - fake.getConfigVersionReturns = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeVerifyClient) GetConfigVersionReturnsOnCall(i int, result1 int, result2 error) { - fake.getConfigVersionMutex.Lock() - defer fake.getConfigVersionMutex.Unlock() - fake.GetConfigVersionStub = nil - if fake.getConfigVersionReturnsOnCall == nil { - fake.getConfigVersionReturnsOnCall = make(map[int]struct { - result1 int - result2 error - }) - } - fake.getConfigVersionReturnsOnCall[i] = struct { - result1 int - result2 error - }{result1, result2} -} - -func (fake *FakeVerifyClient) WaitForCorrectVersion(arg1 context.Context, arg2 int, arg3 string, arg4 []byte, arg5 runtime.ReadFileFunc) error { - var arg4Copy []byte - if arg4 != nil { - arg4Copy = make([]byte, len(arg4)) - copy(arg4Copy, arg4) - } - fake.waitForCorrectVersionMutex.Lock() - ret, specificReturn := fake.waitForCorrectVersionReturnsOnCall[len(fake.waitForCorrectVersionArgsForCall)] - fake.waitForCorrectVersionArgsForCall = append(fake.waitForCorrectVersionArgsForCall, struct { - arg1 context.Context - arg2 int - arg3 string - arg4 []byte - arg5 runtime.ReadFileFunc - }{arg1, arg2, arg3, arg4Copy, arg5}) - stub := fake.WaitForCorrectVersionStub - fakeReturns := fake.waitForCorrectVersionReturns - fake.recordInvocation("WaitForCorrectVersion", []interface{}{arg1, arg2, arg3, arg4Copy, arg5}) - fake.waitForCorrectVersionMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3, arg4, arg5) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionCallCount() int { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - return len(fake.waitForCorrectVersionArgsForCall) -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionCalls(stub func(context.Context, int, string, []byte, runtime.ReadFileFunc) error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = stub -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionArgsForCall(i int) (context.Context, int, string, []byte, runtime.ReadFileFunc) { - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - argsForCall := fake.waitForCorrectVersionArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionReturns(result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - fake.waitForCorrectVersionReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) WaitForCorrectVersionReturnsOnCall(i int, result1 error) { - fake.waitForCorrectVersionMutex.Lock() - defer fake.waitForCorrectVersionMutex.Unlock() - fake.WaitForCorrectVersionStub = nil - if fake.waitForCorrectVersionReturnsOnCall == nil { - fake.waitForCorrectVersionReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.waitForCorrectVersionReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeVerifyClient) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.ensureConfigVersionMutex.RLock() - defer fake.ensureConfigVersionMutex.RUnlock() - fake.getConfigVersionMutex.RLock() - defer fake.getConfigVersionMutex.RUnlock() - fake.waitForCorrectVersionMutex.RLock() - defer fake.waitForCorrectVersionMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeVerifyClient) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} diff --git a/internal/mode/static/nginx/runtime/verify.go b/internal/mode/static/nginx/runtime/verify.go deleted file mode 100644 index e5d7e64b33..0000000000 --- a/internal/mode/static/nginx/runtime/verify.go +++ /dev/null @@ -1,155 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/util/wait" -) - -const configVersionURI = "/var/run/nginx/nginx-config-version.sock" - -var noNewWorkersErrFmt = "reload unsuccessful: no new NGINX worker processes started for config version %d." + - " Please check the NGINX container logs for possible configuration issues: %w" - -//go:generate go tool counterfeiter . nginxConfigVerifier - -type nginxConfigVerifier interface { - GetConfigVersion() (int, error) - WaitForCorrectVersion( - ctx context.Context, - expectedVersion int, - childProcFile string, - previousChildProcesses []byte, - readFile ReadFileFunc, - ) error - EnsureConfigVersion(ctx context.Context, expectedVersion int) error -} - -// VerifyClient is a client for verifying the config version. -type VerifyClient struct { - client *http.Client - timeout time.Duration -} - -// NewVerifyClient returns a new client pointed at the config version socket. -func NewVerifyClient(timeout time.Duration) *VerifyClient { - return &VerifyClient{ - client: &http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", configVersionURI) - }, - }, - }, - timeout: timeout, - } -} - -// GetConfigVersion gets the version number that we put in the nginx config to verify that we're using -// the correct config. -func (c *VerifyClient) GetConfigVersion() (int, error) { - ctx, cancel := context.WithTimeout(context.Background(), c.timeout) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://config-version/version", nil) - if err != nil { - return 0, fmt.Errorf("error creating request: %w", err) - } - - resp, err := c.client.Do(req) - if err != nil { - return 0, fmt.Errorf("error getting client: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return 0, fmt.Errorf("non-200 response: %v", resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return 0, fmt.Errorf("failed to read the response body: %w", err) - } - v, err := strconv.Atoi(string(body)) - if err != nil { - return 0, fmt.Errorf("error converting string to int: %w", err) - } - return v, nil -} - -// WaitForCorrectVersion first ensures any new worker processes have been started, and then calls the config version -// endpoint until it gets the expectedVersion, which ensures that a new worker process has been started for that config -// version. -func (c *VerifyClient) WaitForCorrectVersion( - ctx context.Context, - expectedVersion int, - childProcFile string, - previousChildProcesses []byte, - readFile ReadFileFunc, -) error { - ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() - - if err := ensureNewNginxWorkers( - ctx, - childProcFile, - previousChildProcesses, - readFile, - ); err != nil { - return fmt.Errorf(noNewWorkersErrFmt, expectedVersion, err) - } - - if err := c.EnsureConfigVersion(ctx, expectedVersion); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - err = fmt.Errorf( - "config version check didn't return expected version %d within the deadline", - expectedVersion, - ) - } - return fmt.Errorf("could not get expected config version %d: %w", expectedVersion, err) - } - return nil -} - -func (c *VerifyClient) EnsureConfigVersion(ctx context.Context, expectedVersion int) error { - return wait.PollUntilContextCancel( - ctx, - 25*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - version, err := c.GetConfigVersion() - return version == expectedVersion, err - }, - ) -} - -func ensureNewNginxWorkers( - ctx context.Context, - childProcFile string, - previousContents []byte, - readFile ReadFileFunc, -) error { - return wait.PollUntilContextCancel( - ctx, - 25*time.Millisecond, - true, /* poll immediately */ - func(_ context.Context) (bool, error) { - content, err := readFile(childProcFile) - if err != nil { - return false, err - } - if !bytes.Equal(previousContents, content) { - return true, nil - } - return false, nil - }, - ) -} diff --git a/internal/mode/static/nginx/runtime/verify_test.go b/internal/mode/static/nginx/runtime/verify_test.go deleted file mode 100644 index d20844a410..0000000000 --- a/internal/mode/static/nginx/runtime/verify_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "testing" - "time" - - . "github.com/onsi/gomega" -) - -type transport struct{} - -func (c transport) RoundTrip(_ *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(bytes.NewBufferString("42")), - Header: make(http.Header), - }, nil -} - -func getTestHTTPClient() *http.Client { - ts := transport{} - return &http.Client{ - Transport: ts, - } -} - -func TestVerifyClient(t *testing.T) { - t.Parallel() - c := VerifyClient{ - client: getTestHTTPClient(), - timeout: 25 * time.Millisecond, - } - - ctx := context.Background() - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel) - - newContents := []byte("4 5 6") - - readFileNew := func(string) ([]byte, error) { - return newContents, nil - } - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - tests := []struct { - ctx context.Context - readFile ReadFileFunc - name string - expectedVersion int - expectError bool - }{ - { - ctx: ctx, - expectedVersion: 42, - readFile: readFileNew, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - expectedVersion: 43, - readFile: readFileNew, - expectError: true, - name: "wrong version", - }, - { - ctx: ctx, - expectedVersion: 0, - readFile: readFileError, - expectError: true, - name: "no new workers", - }, - { - ctx: cancellingCtx, - expectedVersion: 0, - readFile: readFileNew, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := c.WaitForCorrectVersion(test.ctx, test.expectedVersion, "/childfile", []byte("1 2 3"), test.readFile) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} - -func TestEnsureNewNginxWorkers(t *testing.T) { - t.Parallel() - previousContents := []byte("1 2 3") - newContents := []byte("4 5 6") - - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - readFilePrevious := func(string) ([]byte, error) { - return previousContents, nil - } - - readFileNew := func(string) ([]byte, error) { - return newContents, nil - } - - ctx := context.Background() - - cancellingCtx, cancel := context.WithCancel(ctx) - time.AfterFunc(100*time.Millisecond, cancel) - - cancellingCtx2, cancel2 := context.WithCancel(ctx) - time.AfterFunc(1*time.Millisecond, cancel2) - - tests := []struct { - ctx context.Context - readFile ReadFileFunc - name string - previousContents []byte - expectError bool - }{ - { - ctx: ctx, - readFile: readFileNew, - previousContents: previousContents, - expectError: false, - name: "normal case", - }, - { - ctx: ctx, - readFile: readFileError, - previousContents: previousContents, - expectError: true, - name: "cannot read file", - }, - { - ctx: cancellingCtx, - readFile: readFilePrevious, - previousContents: previousContents, - expectError: true, - name: "timed out waiting for new workers", - }, - { - ctx: cancellingCtx2, - readFile: readFilePrevious, - previousContents: previousContents, - expectError: true, - name: "context canceled", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := ensureNewNginxWorkers( - test.ctx, - "/childfile", - test.previousContents, - test.readFile, - ) - - if test.expectError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} From 814c5508e528c3b83024542ef5e41e67372454fa Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Thu, 2 Jan 2025 10:33:03 -0700 Subject: [PATCH 02/32] CP/DP Split: Add agent/nginx container and deployment (#2958) Updating the nginx docker containers to build and include agent. Once agent is officially released, we can use the published binary instead of building. Added a temporary nginx deployment to the helm chart to deploy a standalone nginx pod. Added the basic gRPC server and agent API implementation to allow for the agent pod to connect to the control plane without errors. --- .yamllint.yaml | 3 +- build/Dockerfile.nginx | 31 ++- build/Dockerfile.nginxplus | 16 +- build/entrypoint.sh | 53 +++++ .../templates/deployment.yaml | 2 + .../templates/service.yaml | 4 +- .../templates/tmp-nginx-agent-conf.yaml | 19 ++ .../templates/tmp-nginx-deployment.yaml | 186 ++++++++++++++++++ .../templates/tmp-nginx-service.yaml | 36 ++++ .../tmp/tmp-nginx-deployment.yaml | 169 ---------------- .../tmp/tmp-nginx-service.yaml | 35 ---- config/tests/static-deployment.yaml | 2 + deploy/aws-nlb/deploy.yaml | 172 +++++++++++++++- deploy/azure/deploy.yaml | 171 +++++++++++++++- deploy/default/deploy.yaml | 169 +++++++++++++++- deploy/experimental-nginx-plus/deploy.yaml | 182 ++++++++++++++++- deploy/experimental/deploy.yaml | 169 +++++++++++++++- deploy/nginx-plus/deploy.yaml | 182 ++++++++++++++++- deploy/nodeport/deploy.yaml | 169 +++++++++++++++- deploy/openshift/deploy.yaml | 169 +++++++++++++++- .../snippets-filters-nginx-plus/deploy.yaml | 182 ++++++++++++++++- deploy/snippets-filters/deploy.yaml | 169 +++++++++++++++- go.mod | 10 +- go.sum | 20 +- internal/mode/static/manager.go | 23 ++- internal/mode/static/nginx/agent/agent.go | 15 +- internal/mode/static/nginx/agent/command.go | 89 +++++++++ internal/mode/static/nginx/agent/file.go | 62 ++++++ internal/mode/static/nginx/agent/grpc.go | 59 ++++++ tests/go.mod | 4 +- tests/go.sum | 8 +- 31 files changed, 2324 insertions(+), 256 deletions(-) create mode 100755 build/entrypoint.sh create mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml create mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml create mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml delete mode 100644 charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml delete mode 100644 charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml create mode 100644 internal/mode/static/nginx/agent/command.go create mode 100644 internal/mode/static/nginx/agent/file.go create mode 100644 internal/mode/static/nginx/agent/grpc.go diff --git a/.yamllint.yaml b/.yamllint.yaml index b2d07c848f..83713689aa 100644 --- a/.yamllint.yaml +++ b/.yamllint.yaml @@ -2,8 +2,7 @@ ignore: - charts/nginx-gateway-fabric/templates - config/crd/bases/ - - deploy/crds.yaml - - deploy/*nginx-plus + - deploy - site/static rules: diff --git a/build/Dockerfile.nginx b/build/Dockerfile.nginx index 182c172245..b5033574da 100644 --- a/build/Dockerfile.nginx +++ b/build/Dockerfile.nginx @@ -1,18 +1,41 @@ # syntax=docker/dockerfile:1.15 +# TODO(sberman): the commented out lines are for when we use the published agent release +# FROM scratch AS nginx-files + +# # the following links can be replaced with local files if needed, i.e. ADD --chown=101:1001 +# ADD --link --chown=101:1001 https://cs.nginx.com/static/keys/nginx_signing.rsa.pub nginx_signing.rsa.pub + +FROM golang:alpine AS builder + +WORKDIR /tmp + +RUN apk add --no-cache git make \ + && git clone https://github.com/nginx/agent.git \ + && cd agent \ + && git checkout v3 \ + && make build + FROM nginx:1.28.0-alpine-otel ARG NJS_DIR ARG NGINX_CONF_DIR ARG BUILD_AGENT -RUN apk add --no-cache libcap \ +# RUN --mount=type=bind,from=nginx-files,src=nginx_signing.rsa.pub,target=/etc/apk/keys/nginx_signing.rsa.pub \ +# printf "%s\n" "http://packages.nginx.org/nginx-agent/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" >> /etc/apk/repositories \ +# && apk add --no-cache nginx-agent + +RUN apk add --no-cache libcap bash \ && mkdir -p /usr/lib/nginx/modules \ - && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ - && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ + && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ + && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx-debug \ && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx-debug \ && apk del libcap +COPY --from=builder /tmp/agent/build/nginx-agent /usr/bin/nginx-agent + +COPY build/entrypoint.sh /agent/entrypoint.sh COPY ${NJS_DIR}/httpmatches.js /usr/lib/nginx/modules/njs/httpmatches.js COPY ${NGINX_CONF_DIR}/nginx.conf /etc/nginx/nginx.conf COPY ${NGINX_CONF_DIR}/grpc-error-locations.conf /etc/nginx/grpc-error-locations.conf @@ -24,4 +47,4 @@ LABEL org.nginx.ngf.image.build.agent="${BUILD_AGENT}" USER 101:1001 -CMD ["sh", "-c", "rm -rf /var/run/nginx/*.sock && nginx -g 'daemon off;'"] +ENTRYPOINT ["/agent/entrypoint.sh"] diff --git a/build/Dockerfile.nginxplus b/build/Dockerfile.nginxplus index caf7d8297f..42d0228e62 100644 --- a/build/Dockerfile.nginxplus +++ b/build/Dockerfile.nginxplus @@ -4,6 +4,15 @@ FROM scratch AS nginx-files # the following links can be replaced with local files if needed, i.e. ADD --chown=101:1001 ADD --link --chown=101:1001 https://cs.nginx.com/static/keys/nginx_signing.rsa.pub nginx_signing.rsa.pub +FROM golang:alpine AS builder + +WORKDIR /tmp + +RUN apk add --no-cache git make \ + && git clone https://github.com/nginx/agent.git \ + && cd agent \ + && git checkout v3 \ + && make build FROM alpine:3.21 @@ -18,7 +27,7 @@ RUN --mount=type=secret,id=nginx-repo.crt,dst=/etc/apk/cert.pem,mode=0644 \ addgroup -g 1001 -S nginx \ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \ && printf "%s\n" "https://pkgs.nginx.com/plus/${NGINX_PLUS_VERSION}/alpine/v$(grep -E -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" >> /etc/apk/repositories \ - && apk add --no-cache nginx-plus nginx-plus-module-njs nginx-plus-module-otel libcap \ + && apk add --no-cache nginx-plus nginx-plus-module-njs nginx-plus-module-otel libcap bash \ && mkdir -p /usr/lib/nginx/modules \ && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \ && setcap -v 'cap_net_bind_service=+ep' /usr/sbin/nginx \ @@ -29,6 +38,9 @@ RUN --mount=type=secret,id=nginx-repo.crt,dst=/etc/apk/cert.pem,mode=0644 \ && ln -sf /dev/stdout /var/log/nginx/access.log \ && ln -sf /dev/stderr /var/log/nginx/error.log +COPY --from=builder /tmp/agent/build/nginx-agent /usr/bin/nginx-agent + +COPY build/entrypoint.sh /agent/entrypoint.sh COPY ${NJS_DIR}/httpmatches.js /usr/lib/nginx/modules/njs/httpmatches.js COPY ${NGINX_CONF_DIR}/nginx-plus.conf /etc/nginx/nginx.conf COPY ${NGINX_CONF_DIR}/grpc-error-locations.conf /etc/nginx/grpc-error-locations.conf @@ -40,4 +52,4 @@ USER 101:1001 LABEL org.nginx.ngf.image.build.agent="${BUILD_AGENT}" -CMD ["sh", "-c", "rm -rf /var/run/nginx/*.sock && nginx -g 'daemon off;'"] +ENTRYPOINT ["/agent/entrypoint.sh"] diff --git a/build/entrypoint.sh b/build/entrypoint.sh new file mode 100755 index 0000000000..1095831c57 --- /dev/null +++ b/build/entrypoint.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -euxo pipefail + +handle_term() { + echo "received TERM signal" + echo "stopping nginx-agent ..." + kill -TERM "${agent_pid}" 2>/dev/null + echo "stopping nginx ..." + kill -TERM "${nginx_pid}" 2>/dev/null +} + +trap 'handle_term' TERM + +rm -rf /var/run/nginx/*.sock + +# Launch nginx +echo "starting nginx ..." +/usr/sbin/nginx -g "daemon off;" & + +nginx_pid=$! + +SECONDS=0 + +while ! ps -ef | grep "nginx: master process" | grep -v grep; do + if ((SECONDS > 5)); then + echo "couldn't find nginx master process" + exit 1 + fi +done + +# start nginx-agent, pass args +echo "starting nginx-agent ..." +nginx-agent "$@" & + +agent_pid=$! + +if [ $? != 0 ]; then + echo "couldn't start the agent, please check the log file" + exit 1 +fi + +wait_term() { + wait ${agent_pid} + trap - TERM + kill -QUIT "${nginx_pid}" 2>/dev/null + echo "waiting for nginx to stop..." + wait ${nginx_pid} +} + +wait_term + +echo "nginx-agent process has stopped, exiting." diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 204e4b5c3b..025da1ff39 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -123,6 +123,8 @@ spec: {{- toYaml .Values.nginxGateway.resources | nindent 10 }} {{- end }} ports: + - name: agent-grpc + containerPort: 8443 {{- if .Values.metrics.enable }} - name: metrics containerPort: {{ .Values.metrics.port }} diff --git a/charts/nginx-gateway-fabric/templates/service.yaml b/charts/nginx-gateway-fabric/templates/service.yaml index 7324f04723..6a0ed7cfef 100644 --- a/charts/nginx-gateway-fabric/templates/service.yaml +++ b/charts/nginx-gateway-fabric/templates/service.yaml @@ -14,7 +14,7 @@ spec: selector: {{- include "nginx-gateway.selectorLabels" . | nindent 4 }} ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml new file mode 100644 index 0000000000..80aba1c868 --- /dev/null +++ b/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: {{ .Release.Namespace }} +data: + nginx-agent.conf: |- + command: + server: + host: {{ include "nginx-gateway.fullname" . }}.{{ .Release.Namespace }}.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml new file mode 100644 index 0000000000..55c9ee5970 --- /dev/null +++ b/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml @@ -0,0 +1,186 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: tmp-nginx-deployment + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: tmp-nginx-deployment + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + initContainers: + - name: sleep # wait for a bit for control plane to be ready + image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} + imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} + command: + - /usr/bin/gateway + - sleep + - --duration=15s + - name: init + image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} + imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} + command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + {{- if .Values.nginx.plus }} + - --source + - /includes/mgmt.conf + - --nginx-plus + {{- end }} + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + securityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsUser: 102 + runAsGroup: 1001 + volumeMounts: + - name: nginx-includes-bootstrap + mountPath: /includes + - name: nginx-main-includes + mountPath: /etc/nginx/main-includes + containers: + - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + name: nginx + {{- if .Values.nginx.lifecycle }} + lifecycle: + {{- toYaml .Values.nginx.lifecycle | nindent 10 }} + {{- end }} + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + seccompProfile: + type: RuntimeDefault + allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsUser: 101 + runAsGroup: 1001 + volumeMounts: + - name: nginx-agent + mountPath: /etc/nginx-agent + - name: nginx-conf + mountPath: /etc/nginx/conf.d + - name: nginx-stream-conf + mountPath: /etc/nginx/stream-conf.d + - name: nginx-main-includes + mountPath: /etc/nginx/main-includes + - name: nginx-secrets + mountPath: /etc/nginx/secrets + - name: nginx-run + mountPath: /var/run/nginx + - name: nginx-cache + mountPath: /var/cache/nginx + - name: nginx-includes + mountPath: /etc/nginx/includes + {{- if .Values.nginx.plus }} + - name: nginx-lib + mountPath: /var/lib/nginx/state + {{- if .Values.nginx.usage.secretName }} + - name: nginx-plus-license + mountPath: /etc/nginx/license.jwt + subPath: license.jwt + {{- end }} + {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} + - name: nginx-plus-usage-certs + mountPath: /etc/nginx/certs-bootstrap/ + {{- end }} + {{- end }} + {{- with .Values.nginx.extraVolumeMounts -}} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.nginx.debug }} + command: + - "/bin/sh" + args: + - "-c" + - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if .Values.affinity }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} + securityContext: + fsGroup: 1001 + runAsNonRoot: true + {{- if .Values.tolerations }} + tolerations: + {{- toYaml .Values.tolerations | nindent 6 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + volumes: + - name: nginx-agent + configMap: + name: nginx-agent-config + - name: nginx-conf + emptyDir: {} + - name: nginx-stream-conf + emptyDir: {} + - name: nginx-main-includes + emptyDir: {} + - name: nginx-secrets + emptyDir: {} + - name: nginx-run + emptyDir: {} + - name: nginx-cache + emptyDir: {} + - name: nginx-includes + emptyDir: {} + - name: nginx-includes-bootstrap + configMap: + name: nginx-includes-bootstrap + {{- if .Values.nginx.plus }} + - name: nginx-lib + emptyDir: {} + {{- if .Values.nginx.usage.secretName }} + - name: nginx-plus-license + secret: + secretName: {{ .Values.nginx.usage.secretName }} + {{- end }} + {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} + - name: nginx-plus-usage-certs + projected: + sources: + {{- if .Values.nginx.usage.caSecretName }} + - secret: + name: {{ .Values.nginx.usage.caSecretName }} + {{- end }} + {{- if .Values.nginx.usage.clientSSLSecretName }} + - secret: + name: {{ .Values.nginx.usage.clientSSLSecretName }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes -}} + {{ toYaml . | nindent 6 }} + {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml new file mode 100644 index 0000000000..6b82fd1e78 --- /dev/null +++ b/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml @@ -0,0 +1,36 @@ +{{- if .Values.service.create }} +apiVersion: v1 +kind: Service +metadata: + name: tmp-nginx-deployment + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} +{{- end }} + type: {{ .Values.service.type }} +{{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} + {{- end }} +{{- end}} + selector: + app.kubernetes.io/name: tmp-nginx-deployment + app.kubernetes.io/instance: {{ .Release.Name }} + ports: # Update the following ports to match your Gateway Listener ports +{{- if .Values.service.ports }} +{{ toYaml .Values.service.ports | indent 2 }} +{{ end }} +{{- end }} diff --git a/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml b/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml deleted file mode 100644 index 9ddaea89f1..0000000000 --- a/charts/nginx-gateway-fabric/tmp/tmp-nginx-deployment.yaml +++ /dev/null @@ -1,169 +0,0 @@ -# apiVersion: apps/v1 -# kind: Deployment -# metadata: -# name: tmp-nginx-deployment -# namespace: {{ .Release.Namespace }} -# spec: -# template: -# spec: -# initContainers: -# - name: init -# image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} -# imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} -# command: -# - /usr/bin/gateway -# - initialize -# - --source -# - /includes/main.conf -# {{- if .Values.nginx.plus }} -# - --source -# - /includes/mgmt.conf -# - --nginx-plus -# {{- end }} -# - --destination -# - /etc/nginx/main-includes -# env: -# - name: POD_UID -# valueFrom: -# fieldRef: -# fieldPath: metadata.uid -# securityContext: -# seccompProfile: -# type: RuntimeDefault -# capabilities: -# add: -# - KILL # Set because the binary has CAP_KILL for the main controller process. Not used by init. -# drop: -# - ALL -# readOnlyRootFilesystem: true -# runAsUser: 102 -# runAsGroup: 1001 -# volumeMounts: -# - name: nginx-includes-bootstrap -# mountPath: /includes -# - name: nginx-main-includes -# mountPath: /etc/nginx/main-includes -# containers: -# - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} -# imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} -# name: nginx -# {{- if .Values.nginx.lifecycle }} -# lifecycle: -# {{- toYaml .Values.nginx.lifecycle | nindent 10 }} -# {{- end }} -# ports: -# - containerPort: 80 -# name: http -# - containerPort: 443 -# name: https -# securityContext: -# seccompProfile: -# type: RuntimeDefault -# allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} -# capabilities: -# add: -# - NET_BIND_SERVICE -# drop: -# - ALL -# readOnlyRootFilesystem: true -# runAsUser: 101 -# runAsGroup: 1001 -# volumeMounts: -# - name: nginx-conf -# mountPath: /etc/nginx/conf.d -# - name: nginx-stream-conf -# mountPath: /etc/nginx/stream-conf.d -# - name: nginx-main-includes -# mountPath: /etc/nginx/main-includes -# - name: nginx-secrets -# mountPath: /etc/nginx/secrets -# - name: nginx-run -# mountPath: /var/run/nginx -# - name: nginx-cache -# mountPath: /var/cache/nginx -# - name: nginx-includes -# mountPath: /etc/nginx/includes -# {{- if .Values.nginx.plus }} -# - name: nginx-lib -# mountPath: /var/lib/nginx/state -# {{- if .Values.nginx.usage.secretName }} -# - name: nginx-plus-license -# mountPath: /etc/nginx/license.jwt -# subPath: license.jwt -# {{- end }} -# {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} -# - name: nginx-plus-usage-certs -# mountPath: /etc/nginx/certs-bootstrap/ -# {{- end }} -# {{- end }} -# {{- with .Values.nginx.extraVolumeMounts -}} -# {{ toYaml . | nindent 8 }} -# {{- end }} -# {{- if .Values.nginx.debug }} -# command: -# - "/bin/sh" -# args: -# - "-c" -# - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" -# {{- end }} -# terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} -# {{- if .Values.affinity }} -# affinity: -# {{- toYaml .Values.affinity | nindent 8 }} -# {{- end }} -# serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} -# shareProcessNamespace: true -# securityContext: -# fsGroup: 1001 -# runAsNonRoot: true -# {{- if .Values.tolerations }} -# tolerations: -# {{- toYaml .Values.tolerations | nindent 6 }} -# {{- end }} -# {{- if .Values.nodeSelector }} -# nodeSelector: -# {{- toYaml .Values.nodeSelector | nindent 8 }} -# {{- end }} -# volumes: -# - name: nginx-conf -# emptyDir: {} -# - name: nginx-stream-conf -# emptyDir: {} -# - name: nginx-main-includes -# emptyDir: {} -# - name: nginx-secrets -# emptyDir: {} -# - name: nginx-run -# emptyDir: {} -# - name: nginx-cache -# emptyDir: {} -# - name: nginx-includes -# emptyDir: {} -# - name: nginx-includes-bootstrap -# configMap: -# name: nginx-includes-bootstrap -# {{- if .Values.nginx.plus }} -# - name: nginx-lib -# emptyDir: {} -# {{- if .Values.nginx.usage.secretName }} -# - name: nginx-plus-license -# secret: -# secretName: {{ .Values.nginx.usage.secretName }} -# {{- end }} -# {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} -# - name: nginx-plus-usage-certs -# projected: -# sources: -# {{- if .Values.nginx.usage.caSecretName }} -# - secret: -# name: {{ .Values.nginx.usage.caSecretName }} -# {{- end }} -# {{- if .Values.nginx.usage.clientSSLSecretName }} -# - secret: -# name: {{ .Values.nginx.usage.clientSSLSecretName }} -# {{- end }} -# {{- end }} -# {{- end }} -# {{- with .Values.extraVolumes -}} -# {{ toYaml . | nindent 6 }} -# {{- end }} diff --git a/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml b/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml deleted file mode 100644 index 30901bfb6a..0000000000 --- a/charts/nginx-gateway-fabric/tmp/tmp-nginx-service.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# {{- if .Values.service.create }} -# apiVersion: v1 -# kind: Service -# metadata: -# name: {{ include "nginx-gateway.fullname" . }} -# namespace: {{ .Release.Namespace }} -# labels: -# {{- include "nginx-gateway.labels" . | nindent 4 }} -# {{- if .Values.service.annotations }} -# annotations: -# {{ toYaml .Values.service.annotations | indent 4 }} -# {{- end }} -# spec: -# {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} -# {{- if .Values.service.externalTrafficPolicy }} -# externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} -# {{- end }} -# {{- end }} -# type: {{ .Values.service.type }} -# {{- if eq .Values.service.type "LoadBalancer" }} -# {{- if .Values.service.loadBalancerIP }} -# loadBalancerIP: {{ .Values.service.loadBalancerIP }} -# {{- end }} -# {{- if .Values.service.loadBalancerSourceRanges }} -# loadBalancerSourceRanges: -# {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} -# {{- end }} -# {{- end}} -# selector: -# {{- include "nginx-gateway.selectorLabels" . | nindent 4 }} -# ports: # Update the following ports to match your Gateway Listener ports -# {{- if .Values.service.ports }} -# {{ toYaml .Values.service.ports | indent 2 }} -# {{ end }} -# {{- end }} diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml index d6eaf45103..5009a827ed 100644 --- a/config/tests/static-deployment.yaml +++ b/config/tests/static-deployment.yaml @@ -53,6 +53,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - name: agent-grpc + containerPort: 8443 - name: health containerPort: 8081 readinessProbe: diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index 534abf6dfa..6bc4dff222 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -145,6 +145,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -168,15 +188,43 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-type: external + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -232,6 +280,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -257,6 +307,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index ebe6879868..99ee0c2be2 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -145,6 +145,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -168,15 +188,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -232,6 +277,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -259,6 +306,126 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index 534abf6dfa..d9b557e4d7 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -145,6 +145,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -168,15 +188,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -232,6 +277,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -257,6 +304,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 3cae2cf0f2..c23de692ed 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -158,6 +158,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -186,15 +206,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -253,6 +298,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -278,6 +325,137 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + - mountPath: /var/lib/nginx/state + name: nginx-lib + - mountPath: /etc/nginx/license.jwt + name: nginx-plus-license + subPath: license.jwt + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --source + - /includes/mgmt.conf + - --nginx-plus + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap + - emptyDir: {} + name: nginx-lib + - name: nginx-plus-license + secret: + secretName: nplus-license +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index a7bec5e823..e6cd3b16b4 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -150,6 +150,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -173,15 +193,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -238,6 +283,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -263,6 +310,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index dbdb743df2..479e20805c 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -153,6 +153,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -181,15 +201,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -247,6 +292,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -272,6 +319,137 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + - mountPath: /var/lib/nginx/state + name: nginx-lib + - mountPath: /etc/nginx/license.jwt + name: nginx-plus-license + subPath: license.jwt + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --source + - /includes/mgmt.conf + - --nginx-plus + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap + - emptyDir: {} + name: nginx-lib + - name: nginx-plus-license + secret: + secretName: nplus-license +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index 534abf6dfa..c39299e838 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -145,6 +145,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -168,15 +188,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: NodePort +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -232,6 +277,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -257,6 +304,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index 940f15457d..ea1e5f43f9 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -153,6 +153,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -176,15 +196,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -240,6 +285,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -265,6 +312,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index c05cf2f26a..435319441e 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -155,6 +155,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -183,15 +203,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -250,6 +295,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -275,6 +322,137 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + - mountPath: /var/lib/nginx/state + name: nginx-lib + - mountPath: /etc/nginx/license.jwt + name: nginx-plus-license + subPath: license.jwt + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --source + - /includes/mgmt.conf + - --nginx-plus + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap + - emptyDir: {} + name: nginx-lib + - name: nginx-plus-license + secret: + secretName: nplus-license +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 63a114ab6d..e97a00d98f 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -147,6 +147,26 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 +data: + nginx-agent.conf: |- + command: + server: + host: nginx-gateway.nginx-gateway.svc + port: 443 + allowed_directories: + - /etc/nginx + - /usr/share/nginx + - /var/run/nginx + features: + - connection + log: + level: debug +kind: ConfigMap +metadata: + name: nginx-agent-config + namespace: nginx-gateway +--- +apiVersion: v1 data: main.conf: | error_log stderr info; @@ -170,15 +190,40 @@ metadata: namespace: nginx-gateway spec: ports: - - name: grpc + - name: agent-grpc port: 443 protocol: TCP - targetPort: 443 + targetPort: 8443 selector: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: nginx-gateway type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + type: LoadBalancer +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -235,6 +280,8 @@ spec: imagePullPolicy: Always name: nginx-gateway ports: + - containerPort: 8443 + name: agent-grpc - containerPort: 9113 name: metrics - containerPort: 8081 @@ -260,6 +307,124 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tmp-nginx-deployment + namespace: nginx-gateway +spec: + selector: + matchLabels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + template: + metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: tmp-nginx-deployment + spec: + containers: + - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nginx-agent + name: nginx-agent + - mountPath: /etc/nginx/conf.d + name: nginx-conf + - mountPath: /etc/nginx/stream-conf.d + name: nginx-stream-conf + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + - mountPath: /etc/nginx/secrets + name: nginx-secrets + - mountPath: /var/run/nginx + name: nginx-run + - mountPath: /var/cache/nginx + name: nginx-cache + - mountPath: /etc/nginx/includes + name: nginx-includes + initContainers: + - command: + - /usr/bin/gateway + - sleep + - --duration=15s + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: sleep + - command: + - /usr/bin/gateway + - initialize + - --source + - /includes/main.conf + - --destination + - /etc/nginx/main-includes + env: + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: init + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 102 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /includes + name: nginx-includes-bootstrap + - mountPath: /etc/nginx/main-includes + name: nginx-main-includes + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + name: nginx-agent-config + name: nginx-agent + - emptyDir: {} + name: nginx-conf + - emptyDir: {} + name: nginx-stream-conf + - emptyDir: {} + name: nginx-main-includes + - emptyDir: {} + name: nginx-secrets + - emptyDir: {} + name: nginx-run + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-includes + - configMap: + name: nginx-includes-bootstrap + name: nginx-includes-bootstrap +--- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: diff --git a/go.mod b/go.mod index e19a5cfde8..400114b860 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 + github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4 github.com/nginx/telemetry-exporter v0.1.4 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 @@ -16,6 +17,7 @@ require ( go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.uber.org/zap v1.27.0 + google.golang.org/grpc v1.72.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.32.3 @@ -26,15 +28,16 @@ require ( ) require ( + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -52,7 +55,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -82,7 +85,6 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/grpc v1.72.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index e451191866..5c0c2f008c 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 h1:2IGhRovxlsOIQgx2ekZWo4wTPAYpck41+18ICxs37is= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1/go.mod h1:Tgn5bgL220vkFOI0KPStlcClPeOJzAv4uT+V8JXGUnw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -15,14 +17,14 @@ github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -40,12 +42,14 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -66,8 +70,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -85,6 +89,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4 h1:Tn0SOlxq9uaJuqc6DUGZGYszrtHAHOaLnhbBWMzK1Bs= +github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= @@ -200,6 +206,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index ea5d61b16d..4493f23fe9 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -9,6 +9,7 @@ import ( tel "github.com/nginx/telemetry-exporter/pkg/telemetry" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "google.golang.org/grpc" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" discoveryV1 "k8s.io/api/discovery/v1" @@ -71,6 +72,7 @@ const ( plusCAField = "ca.crt" plusClientCertField = "tls.crt" plusClientKeyField = "tls.key" + grpcServerPort = 8443 ) var scheme = runtime.NewScheme() @@ -178,11 +180,24 @@ func StartManager(cfg config.Config) error { Logger: cfg.Logger.WithName("deployCtxCollector"), }) - eventHandler := newEventHandlerImpl(eventHandlerConfig{ - nginxUpdater: &agent.NginxUpdaterImpl{ - Logger: cfg.Logger.WithName("nginxUpdater"), - Plus: cfg.Plus, + nginxUpdater := agent.NewNginxUpdater(cfg.Logger.WithName("nginxUpdater"), cfg.Plus) + + grpcServer := &agent.GRPCServer{ + Logger: cfg.Logger.WithName("agentGRPCServer"), + RegisterServices: []func(*grpc.Server){ + nginxUpdater.CommandService.Register, + nginxUpdater.FileService.Register, }, + Port: grpcServerPort, + } + + if err = mgr.Add(&runnables.LeaderOrNonLeader{Runnable: grpcServer}); err != nil { + return fmt.Errorf("cannot register grpc server: %w", err) + } + + // TODO(sberman): event handler loop should wait on a channel until the grpc server has started + eventHandler := newEventHandlerImpl(eventHandlerConfig{ + nginxUpdater: nginxUpdater, metricsCollector: handlerCollector, statusUpdater: groupStatusUpdater, processor: processor, diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 93777628f2..c6955040cb 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -16,8 +16,19 @@ type NginxUpdater interface { // NginxUpdaterImpl implements the NginxUpdater interface. type NginxUpdaterImpl struct { - Logger logr.Logger - Plus bool + CommandService *commandService + FileService *fileService + Logger logr.Logger + Plus bool +} + +func NewNginxUpdater(logger logr.Logger, plus bool) *NginxUpdaterImpl { + return &NginxUpdaterImpl{ + Logger: logger, + Plus: plus, + CommandService: newCommandService(), + FileService: newFileService(), + } } // UpdateConfig sends the nginx configuration to the agent. diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go new file mode 100644 index 0000000000..3cdf6ce101 --- /dev/null +++ b/internal/mode/static/nginx/agent/command.go @@ -0,0 +1,89 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "time" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/grpc" +) + +// commandService handles the connection and subscription to the agent. +type commandService struct { + pb.CommandServiceServer +} + +func newCommandService() *commandService { + return &commandService{} +} + +func (cs *commandService) Register(server *grpc.Server) { + pb.RegisterCommandServiceServer(server, cs) +} + +func (cs *commandService) CreateConnection( + _ context.Context, + req *pb.CreateConnectionRequest, +) (*pb.CreateConnectionResponse, error) { + if req == nil { + return nil, errors.New("empty connection request") + } + + fmt.Printf("Creating connection for nginx pod: %s\n", req.GetResource().GetContainerInfo().GetHostname()) + + return &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + }, nil +} + +func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error { + fmt.Println("Received subscribe request") + + ctx := in.Context() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(1 * time.Minute): + dummyRequest := &pb.ManagementPlaneRequest{ + Request: &pb.ManagementPlaneRequest_StatusRequest{ + StatusRequest: &pb.StatusRequest{}, + }, + } + if err := in.Send(dummyRequest); err != nil { // will likely need retry logic + fmt.Printf("ERROR: %v\n", err) + } + } + } +} + +func (cs *commandService) UpdateDataPlaneStatus( + _ context.Context, + req *pb.UpdateDataPlaneStatusRequest, +) (*pb.UpdateDataPlaneStatusResponse, error) { + fmt.Println("Updating data plane status") + + if req == nil { + return nil, errors.New("empty update data plane status request") + } + + return &pb.UpdateDataPlaneStatusResponse{}, nil +} + +func (cs *commandService) UpdateDataPlaneHealth( + _ context.Context, + req *pb.UpdateDataPlaneHealthRequest, +) (*pb.UpdateDataPlaneHealthResponse, error) { + fmt.Println("Updating data plane health") + + if req == nil { + return nil, errors.New("empty update dataplane health request") + } + + return &pb.UpdateDataPlaneHealthResponse{}, nil +} diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go new file mode 100644 index 0000000000..9a3df38c4e --- /dev/null +++ b/internal/mode/static/nginx/agent/file.go @@ -0,0 +1,62 @@ +package agent + +import ( + "context" + "fmt" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/grpc" +) + +// fileService handles file management between the control plane and the agent. +type fileService struct { + pb.FileServiceServer +} + +func newFileService() *fileService { + return &fileService{} +} + +func (fs *fileService) Register(server *grpc.Server) { + pb.RegisterFileServiceServer(server, fs) +} + +func (fs *fileService) GetOverview( + _ context.Context, + _ *pb.GetOverviewRequest, +) (*pb.GetOverviewResponse, error) { + fmt.Println("Get overview request") + + return &pb.GetOverviewResponse{ + Overview: &pb.FileOverview{}, + }, nil +} + +func (fs *fileService) UpdateOverview( + _ context.Context, + _ *pb.UpdateOverviewRequest, +) (*pb.UpdateOverviewResponse, error) { + fmt.Println("Update overview request") + + return &pb.UpdateOverviewResponse{}, nil +} + +func (fs *fileService) GetFile( + _ context.Context, + req *pb.GetFileRequest, +) (*pb.GetFileResponse, error) { + filename := req.GetFileMeta().GetName() + hash := req.GetFileMeta().GetHash() + fmt.Printf("Getting file: %s, %s\n", filename, hash) + + return &pb.GetFileResponse{}, nil +} + +func (fs *fileService) UpdateFile( + _ context.Context, + req *pb.UpdateFileRequest, +) (*pb.UpdateFileResponse, error) { + fmt.Println("Update file request for: ", req.GetFile().GetFileMeta().GetName()) + + return &pb.UpdateFileResponse{}, nil +} diff --git a/internal/mode/static/nginx/agent/grpc.go b/internal/mode/static/nginx/agent/grpc.go new file mode 100644 index 0000000000..6c558da2f3 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc.go @@ -0,0 +1,59 @@ +package agent + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + keepAliveTime = 1 * time.Minute + keepAliveTimeout = 15 * time.Second +) + +// GRPCServer is a gRPC server for communicating with the nginx agent. +type GRPCServer struct { + Logger logr.Logger + // RegisterServices is a list of functions to register gRPC services to the gRPC server. + RegisterServices []func(*grpc.Server) + // Port is the port that the server is listening on. + // Must be exposed in the control plane deployment/service. + Port int +} + +// Start is a runnable that starts the gRPC server for communicating with the nginx agent. +func (g *GRPCServer) Start(ctx context.Context) error { + listener, err := net.Listen("tcp", fmt.Sprintf(":%d", g.Port)) + if err != nil { + return err + } + + server := grpc.NewServer( + grpc.KeepaliveParams( + keepalive.ServerParameters{ + Time: keepAliveTime, + Timeout: keepAliveTimeout, + }, + ), + ) + + for _, registerSvc := range g.RegisterServices { + registerSvc(server) + } + + go func() { + <-ctx.Done() + g.Logger.Info("Shutting down GRPC Server") + server.GracefulStop() + }() + + return server.Serve(listener) +} + +var _ manager.Runnable = &GRPCServer{} diff --git a/tests/go.mod b/tests/go.mod index 3ce2b439a9..cf265e2fae 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -26,7 +26,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -45,7 +45,7 @@ require ( github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/miekg/dns v1.1.65 // indirect github.com/moby/spdystream v0.5.0 // indirect diff --git a/tests/go.sum b/tests/go.sum index c69d6c4a8d..f32304f508 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -18,8 +18,8 @@ github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -67,8 +67,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= From b6a6dd6a505661e799af807a08675531a659b844 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 6 Jan 2025 13:37:11 -0700 Subject: [PATCH 03/32] CP/DP Split: track agent connections (#2970) Added the following: - middleware to extract IP address of agent and store it in the grpc context - link the agent's hostname to its IP address when connecting and track it - use this linkage to pause the Subscription until the agent registers itself, then proceeding This logic is subject to change as we enhance this (like tracking auth token instead of IP address). --- internal/mode/static/manager.go | 11 +- internal/mode/static/nginx/agent/agent.go | 19 +-- internal/mode/static/nginx/agent/command.go | 120 ++++++++++++++---- internal/mode/static/nginx/agent/file.go | 38 +++--- internal/mode/static/nginx/agent/grpc.go | 59 --------- .../static/nginx/agent/grpc/connections.go | 50 ++++++++ .../nginx/agent/grpc/context/context.go | 24 ++++ .../static/nginx/agent/grpc/context/doc.go | 4 + internal/mode/static/nginx/agent/grpc/doc.go | 4 + internal/mode/static/nginx/agent/grpc/grpc.go | 91 +++++++++++++ .../nginx/agent/grpc/interceptor/doc.go | 4 + .../agent/grpc/interceptor/interceptor.go | 83 ++++++++++++ 12 files changed, 392 insertions(+), 115 deletions(-) delete mode 100644 internal/mode/static/nginx/agent/grpc.go create mode 100644 internal/mode/static/nginx/agent/grpc/connections.go create mode 100644 internal/mode/static/nginx/agent/grpc/context/context.go create mode 100644 internal/mode/static/nginx/agent/grpc/context/doc.go create mode 100644 internal/mode/static/nginx/agent/grpc/doc.go create mode 100644 internal/mode/static/nginx/agent/grpc/grpc.go create mode 100644 internal/mode/static/nginx/agent/grpc/interceptor/doc.go create mode 100644 internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 4493f23fe9..459a79b5e4 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -51,6 +51,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" ngxcfg "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" @@ -182,14 +183,14 @@ func StartManager(cfg config.Config) error { nginxUpdater := agent.NewNginxUpdater(cfg.Logger.WithName("nginxUpdater"), cfg.Plus) - grpcServer := &agent.GRPCServer{ - Logger: cfg.Logger.WithName("agentGRPCServer"), - RegisterServices: []func(*grpc.Server){ + grpcServer := agentgrpc.NewServer( + cfg.Logger.WithName("agentGRPCServer"), + grpcServerPort, + []func(*grpc.Server){ nginxUpdater.CommandService.Register, nginxUpdater.FileService.Register, }, - Port: grpcServerPort, - } + ) if err = mgr.Add(&runnables.LeaderOrNonLeader{Runnable: grpcServer}); err != nil { return fmt.Errorf("cannot register grpc server: %w", err) diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index c6955040cb..1ce5d21b0b 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -18,30 +18,31 @@ type NginxUpdater interface { type NginxUpdaterImpl struct { CommandService *commandService FileService *fileService - Logger logr.Logger - Plus bool + logger logr.Logger + plus bool } +// NewNginxUpdater returns a new NginxUpdaterImpl instance. func NewNginxUpdater(logger logr.Logger, plus bool) *NginxUpdaterImpl { return &NginxUpdaterImpl{ - Logger: logger, - Plus: plus, - CommandService: newCommandService(), - FileService: newFileService(), + logger: logger, + plus: plus, + CommandService: newCommandService(logger.WithName("commandService")), + FileService: newFileService(logger.WithName("fileService")), } } // UpdateConfig sends the nginx configuration to the agent. func (n *NginxUpdaterImpl) UpdateConfig(files int) { - n.Logger.Info("Sending nginx configuration to agent", "numFiles", files) + n.logger.Info("Sending nginx configuration to agent", "numFiles", files) } // UpdateUpstreamServers sends an APIRequest to the agent to update upstream servers using the NGINX Plus API. // Only applicable when using NGINX Plus. func (n *NginxUpdaterImpl) UpdateUpstreamServers() { - if !n.Plus { + if !n.plus { return } - n.Logger.Info("Updating upstream servers using NGINX Plus API") + n.logger.Info("Updating upstream servers using NGINX Plus API") } diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index 3cdf6ce101..79c863d129 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -6,32 +6,51 @@ import ( "fmt" "time" + "github.com/go-logr/logr" pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" "google.golang.org/grpc" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" ) -// commandService handles the connection and subscription to the agent. +// commandService handles the connection and subscription to the data plane agent. type commandService struct { pb.CommandServiceServer + connTracker *agentgrpc.ConnectionsTracker + // TODO(sberman): all logs are at Info level right now. Adjust appropriately. + logger logr.Logger } -func newCommandService() *commandService { - return &commandService{} +func newCommandService(logger logr.Logger) *commandService { + return &commandService{ + logger: logger, + connTracker: agentgrpc.NewConnectionsTracker(), + } } func (cs *commandService) Register(server *grpc.Server) { pb.RegisterCommandServiceServer(server, cs) } +// CreateConnection registers a data plane agent with the control plane. func (cs *commandService) CreateConnection( - _ context.Context, + ctx context.Context, req *pb.CreateConnectionRequest, ) (*pb.CreateConnectionResponse, error) { if req == nil { return nil, errors.New("empty connection request") } - fmt.Printf("Creating connection for nginx pod: %s\n", req.GetResource().GetContainerInfo().GetHostname()) + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + podName := req.GetResource().GetContainerInfo().GetHostname() + + cs.logger.Info(fmt.Sprintf("Creating connection for nginx pod: %s", podName)) + cs.connTracker.Track(gi.IPAddress, podName) return &pb.CreateConnectionResponse{ Response: &pb.CommandResponse{ @@ -40,50 +59,99 @@ func (cs *commandService) CreateConnection( }, nil } +// Subscribe is a decoupled communication mechanism between the data plane agent and control plane. func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error { - fmt.Println("Received subscribe request") - ctx := in.Context() + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return agentgrpc.ErrStatusInvalidConnection + } + + cs.logger.Info(fmt.Sprintf("Received subscribe request from %q", gi.IPAddress)) + + go cs.listenForDataPlaneResponse(ctx, in) + + // wait for the agent to report itself + podName, err := cs.waitForConnection(ctx, gi) + if err != nil { + cs.logger.Error(err, "error waiting for connection") + return err + } + + cs.logger.Info(fmt.Sprintf("Handling subscription for %s/%s", podName, gi.IPAddress)) for { select { case <-ctx.Done(): return ctx.Err() case <-time.After(1 * time.Minute): dummyRequest := &pb.ManagementPlaneRequest{ - Request: &pb.ManagementPlaneRequest_StatusRequest{ - StatusRequest: &pb.StatusRequest{}, + Request: &pb.ManagementPlaneRequest_HealthRequest{ + HealthRequest: &pb.HealthRequest{}, }, } - if err := in.Send(dummyRequest); err != nil { // will likely need retry logic - fmt.Printf("ERROR: %v\n", err) + if err := in.Send(dummyRequest); err != nil { // TODO(sberman): will likely need retry logic + cs.logger.Error(err, "error sending request to agent") } } } } -func (cs *commandService) UpdateDataPlaneStatus( - _ context.Context, - req *pb.UpdateDataPlaneStatusRequest, -) (*pb.UpdateDataPlaneStatusResponse, error) { - fmt.Println("Updating data plane status") +// TODO(sberman): current issue: when control plane restarts, agent doesn't re-establish a CreateConnection call, +// so this fails. +func (cs *commandService) waitForConnection(ctx context.Context, gi grpcContext.GrpcInfo) (string, error) { + var podName string + ticker := time.NewTicker(time.Second) + defer ticker.Stop() - if req == nil { - return nil, errors.New("empty update data plane status request") + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-timer.C: + return "", errors.New("timed out waiting for agent connection") + case <-ticker.C: + if podName = cs.connTracker.GetConnection(gi.IPAddress); podName != "" { + return podName, nil + } + } } +} - return &pb.UpdateDataPlaneStatusResponse{}, nil +func (cs *commandService) listenForDataPlaneResponse(ctx context.Context, in pb.CommandService_SubscribeServer) { + for { + select { + case <-ctx.Done(): + return + default: + dataPlaneResponse, err := in.Recv() + cs.logger.Info(fmt.Sprintf("Received data plane response: %v", dataPlaneResponse)) + if err != nil { + cs.logger.Error(err, "failed to receive data plane response") + return + } + } + } } +// UpdateDataPlaneHealth includes full health information about the data plane as reported by the agent. +// TODO(sberman): Is health monitoring the data planes something useful for us to do? func (cs *commandService) UpdateDataPlaneHealth( _ context.Context, - req *pb.UpdateDataPlaneHealthRequest, + _ *pb.UpdateDataPlaneHealthRequest, ) (*pb.UpdateDataPlaneHealthResponse, error) { - fmt.Println("Updating data plane health") - - if req == nil { - return nil, errors.New("empty update dataplane health request") - } - return &pb.UpdateDataPlaneHealthResponse{}, nil } + +// UpdateDataPlaneStatus is called by agent on startup and upon any change in agent metadata, +// instance metadata, or configurations. Since directly changing nginx configuration on the instance +// is not supported, this is a no-op for NGF. +func (cs *commandService) UpdateDataPlaneStatus( + _ context.Context, + _ *pb.UpdateDataPlaneStatusRequest, +) (*pb.UpdateDataPlaneStatusResponse, error) { + return &pb.UpdateDataPlaneStatusResponse{}, nil +} diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go index 9a3df38c4e..296e1705ee 100644 --- a/internal/mode/static/nginx/agent/file.go +++ b/internal/mode/static/nginx/agent/file.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/go-logr/logr" pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" "google.golang.org/grpc" ) @@ -11,52 +12,57 @@ import ( // fileService handles file management between the control plane and the agent. type fileService struct { pb.FileServiceServer + // TODO(sberman): all logs are at Info level right now. Adjust appropriately. + logger logr.Logger } -func newFileService() *fileService { - return &fileService{} +func newFileService(logger logr.Logger) *fileService { + return &fileService{logger: logger} } func (fs *fileService) Register(server *grpc.Server) { pb.RegisterFileServiceServer(server, fs) } +// GetOverview gets the overview of files for a particular configuration version of an instance. +// Agent calls this if it's missing an overview when a ConfigApplyRequest is called by the control plane. func (fs *fileService) GetOverview( _ context.Context, _ *pb.GetOverviewRequest, ) (*pb.GetOverviewResponse, error) { - fmt.Println("Get overview request") + fs.logger.Info("Get overview request") return &pb.GetOverviewResponse{ Overview: &pb.FileOverview{}, }, nil } -func (fs *fileService) UpdateOverview( - _ context.Context, - _ *pb.UpdateOverviewRequest, -) (*pb.UpdateOverviewResponse, error) { - fmt.Println("Update overview request") - - return &pb.UpdateOverviewResponse{}, nil -} - +// GetFile is called by the agent when it needs to download a file for a ConfigApplyRequest. func (fs *fileService) GetFile( _ context.Context, req *pb.GetFileRequest, ) (*pb.GetFileResponse, error) { filename := req.GetFileMeta().GetName() hash := req.GetFileMeta().GetHash() - fmt.Printf("Getting file: %s, %s\n", filename, hash) + fs.logger.Info(fmt.Sprintf("Getting file: %s, %s", filename, hash)) return &pb.GetFileResponse{}, nil } +// UpdateOverview is called by agent on startup and whenever any files change on the instance. +// Since directly changing nginx configuration on the instance is not supported, this is a no-op for NGF. +func (fs *fileService) UpdateOverview( + _ context.Context, + _ *pb.UpdateOverviewRequest, +) (*pb.UpdateOverviewResponse, error) { + return &pb.UpdateOverviewResponse{}, nil +} + +// UpdateFile is called by agent whenever any files change on the instance. +// Since directly changing nginx configuration on the instance is not supported, this is a no-op for NGF. func (fs *fileService) UpdateFile( _ context.Context, - req *pb.UpdateFileRequest, + _ *pb.UpdateFileRequest, ) (*pb.UpdateFileResponse, error) { - fmt.Println("Update file request for: ", req.GetFile().GetFileMeta().GetName()) - return &pb.UpdateFileResponse{}, nil } diff --git a/internal/mode/static/nginx/agent/grpc.go b/internal/mode/static/nginx/agent/grpc.go deleted file mode 100644 index 6c558da2f3..0000000000 --- a/internal/mode/static/nginx/agent/grpc.go +++ /dev/null @@ -1,59 +0,0 @@ -package agent - -import ( - "context" - "fmt" - "net" - "time" - - "github.com/go-logr/logr" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -const ( - keepAliveTime = 1 * time.Minute - keepAliveTimeout = 15 * time.Second -) - -// GRPCServer is a gRPC server for communicating with the nginx agent. -type GRPCServer struct { - Logger logr.Logger - // RegisterServices is a list of functions to register gRPC services to the gRPC server. - RegisterServices []func(*grpc.Server) - // Port is the port that the server is listening on. - // Must be exposed in the control plane deployment/service. - Port int -} - -// Start is a runnable that starts the gRPC server for communicating with the nginx agent. -func (g *GRPCServer) Start(ctx context.Context) error { - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", g.Port)) - if err != nil { - return err - } - - server := grpc.NewServer( - grpc.KeepaliveParams( - keepalive.ServerParameters{ - Time: keepAliveTime, - Timeout: keepAliveTimeout, - }, - ), - ) - - for _, registerSvc := range g.RegisterServices { - registerSvc(server) - } - - go func() { - <-ctx.Done() - g.Logger.Info("Shutting down GRPC Server") - server.GracefulStop() - }() - - return server.Serve(listener) -} - -var _ manager.Runnable = &GRPCServer{} diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go new file mode 100644 index 0000000000..af99b84002 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -0,0 +1,50 @@ +package grpc + +import ( + "sync" +) + +// ConnectionsTracker keeps track of all connections between the control plane and nginx agents. +type ConnectionsTracker struct { + // connections contains a map of all IP addresses that have connected and their associated pod names. + // TODO(sberman): we'll likely need to create a channel for each connection that can be stored in this map. + // Then the Subscription listens on the channel for its connection, while the nginxUpdater sends the config + // for the pod over that channel. + connections map[string]string + + lock sync.Mutex +} + +// NewConnectionsTracker returns a new ConnectionsTracker instance. +func NewConnectionsTracker() *ConnectionsTracker { + return &ConnectionsTracker{ + connections: make(map[string]string), + } +} + +// Track adds a connection to the tracking map. +// TODO(sberman): we need to handle the case when the token expires (once we support the token). +// This likely involves setting a callback to cancel a context when the token expires, which triggers +// the connection to be removed from the tracking list. +func (c *ConnectionsTracker) Track(address, hostname string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.connections[address] = hostname +} + +// GetConnections returns all connections that are currently tracked. +func (c *ConnectionsTracker) GetConnections() map[string]string { + c.lock.Lock() + defer c.lock.Unlock() + + return c.connections +} + +// GetConnection returns the hostname of the requested connection. +func (c *ConnectionsTracker) GetConnection(address string) string { + c.lock.Lock() + defer c.lock.Unlock() + + return c.connections[address] +} diff --git a/internal/mode/static/nginx/agent/grpc/context/context.go b/internal/mode/static/nginx/agent/grpc/context/context.go new file mode 100644 index 0000000000..f8daf457eb --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/context.go @@ -0,0 +1,24 @@ +package context + +import ( + "context" +) + +// GrpcInfo for storing identity information for the gRPC client. +type GrpcInfo struct { + IPAddress string `json:"ip_address"` // ip address of the agent +} + +type contextGRPCKey struct{} + +// NewGrpcContext returns a new context.Context that has the provided GrpcInfo attached. +func NewGrpcContext(ctx context.Context, r GrpcInfo) context.Context { + return context.WithValue(ctx, contextGRPCKey{}, r) +} + +// GrpcInfoFromContext returns the GrpcInfo saved in ctx if it exists. +// Returns false if there's no GrpcInfo in the context. +func GrpcInfoFromContext(ctx context.Context) (GrpcInfo, bool) { + v, ok := ctx.Value(contextGRPCKey{}).(GrpcInfo) + return v, ok +} diff --git a/internal/mode/static/nginx/agent/grpc/context/doc.go b/internal/mode/static/nginx/agent/grpc/context/doc.go new file mode 100644 index 0000000000..689a126cf7 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/doc.go @@ -0,0 +1,4 @@ +/* +Package context contains the functions for storing extra information in the gRPC context. +*/ +package context diff --git a/internal/mode/static/nginx/agent/grpc/doc.go b/internal/mode/static/nginx/agent/grpc/doc.go new file mode 100644 index 0000000000..b98f0af8b6 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/doc.go @@ -0,0 +1,4 @@ +/* +Package grpc contains the functionality for the gRPC server for communicating with the nginx agent. +*/ +package grpc diff --git a/internal/mode/static/nginx/agent/grpc/grpc.go b/internal/mode/static/nginx/agent/grpc/grpc.go new file mode 100644 index 0000000000..11c57ec3e9 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/grpc.go @@ -0,0 +1,91 @@ +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/interceptor" +) + +const ( + keepAliveTime = 10 * time.Second + keepAliveTimeout = 10 * time.Second +) + +var ErrStatusInvalidConnection = status.Error(codes.Unauthenticated, "invalid connection") + +// Interceptor provides hooks to intercept the execution of an RPC on the server. +type Interceptor interface { + Stream() grpc.StreamServerInterceptor + Unary() grpc.UnaryServerInterceptor +} + +// Server is a gRPC server for communicating with the nginx agent. +type Server struct { + // Interceptor provides hooks to intercept the execution of an RPC on the server. + interceptor Interceptor + + logger logr.Logger + // RegisterServices is a list of functions to register gRPC services to the gRPC server. + registerServices []func(*grpc.Server) + // Port is the port that the server is listening on. + // Must be exposed in the control plane deployment/service. + port int +} + +func NewServer(logger logr.Logger, port int, registerSvcs []func(*grpc.Server)) *Server { + return &Server{ + logger: logger, + port: port, + registerServices: registerSvcs, + interceptor: interceptor.NewContextSetter(), + } +} + +// Start is a runnable that starts the gRPC server for communicating with the nginx agent. +func (g *Server) Start(ctx context.Context) error { + listener, err := net.Listen("tcp", fmt.Sprintf(":%d", g.port)) + if err != nil { + return err + } + + server := grpc.NewServer( + grpc.KeepaliveParams( + keepalive.ServerParameters{ + Time: keepAliveTime, + Timeout: keepAliveTimeout, + }, + ), + grpc.KeepaliveEnforcementPolicy( + keepalive.EnforcementPolicy{ + MinTime: keepAliveTime, + PermitWithoutStream: true, + }, + ), + grpc.ChainStreamInterceptor(g.interceptor.Stream()), + grpc.ChainUnaryInterceptor(g.interceptor.Unary()), + ) + + for _, registerSvc := range g.registerServices { + registerSvc(server) + } + + go func() { + <-ctx.Done() + g.logger.Info("Shutting down GRPC Server") + server.GracefulStop() + }() + + return server.Serve(listener) +} + +var _ manager.Runnable = &Server{} diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/doc.go b/internal/mode/static/nginx/agent/grpc/interceptor/doc.go new file mode 100644 index 0000000000..e5175664b9 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/doc.go @@ -0,0 +1,4 @@ +/* +Package interceptor contains the middleware for intercepting an RPC call. +*/ +package interceptor diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go new file mode 100644 index 0000000000..3139da3cec --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go @@ -0,0 +1,83 @@ +package interceptor + +import ( + "context" + "fmt" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" +) + +// streamHandler is a struct that implements StreamHandler, allowing the interceptor to replace the context. +type streamHandler struct { + grpc.ServerStream + ctx context.Context +} + +func (sh *streamHandler) Context() context.Context { + return sh.ctx +} + +type ContextSetter struct{} + +func NewContextSetter() ContextSetter { + return ContextSetter{} +} + +func (c ContextSetter) Stream() grpc.StreamServerInterceptor { + return func( + srv interface{}, + ss grpc.ServerStream, + _ *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx, err := setContext(ss.Context()) + if err != nil { + return err + } + return handler(srv, &streamHandler{ + ServerStream: ss, + ctx: ctx, + }) + } +} + +func (c ContextSetter) Unary() grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + _ *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp interface{}, err error) { + if ctx, err = setContext(ctx); err != nil { + return nil, err + } + return handler(ctx, req) + } +} + +// TODO(sberman): for now, we'll just use the IP address of the agent to link a Connection +// to a Subscription by setting it in the context. Once we support auth, we can likely change this +// interceptor to instead set the uuid. +func setContext(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, status.Error(codes.InvalidArgument, "no peer data") + } + + addr, ok := p.Addr.(*net.TCPAddr) + if !ok { + panic(fmt.Sprintf("address %q was not of type net.TCPAddr", p.Addr.String())) + } + + gi := &grpcContext.GrpcInfo{ + IPAddress: addr.IP.String(), + } + + return grpcContext.NewGrpcContext(ctx, *gi), nil +} From 2a45ba3e05cb4f188c0727884f813b2e2095bef9 Mon Sep 17 00:00:00 2001 From: Kate Osborn <50597707+kate-osborn@users.noreply.github.com> Date: Tue, 28 Jan 2025 12:24:18 -0700 Subject: [PATCH 04/32] Support NginxProxy at the Gateway level (#3058) Problem: When the control plane and data planes are split, the user will need the ability to specify data plane settings on a per-Gateway basis. To allow this, we need to support NginxProxy at the Gateway level in addition the the GatewayClass level. In practice, this means a user can reference an NginxProxy resource via the spec.infrastructure.parametersRef field on the Gateway resource. We still want to support referencing an NginxProxy at the GatewayClass level. If a Gateway and its GatewayClass reference distinct NginxProxy resources, the settings must be merged. Settings specified on a Gateway NginxProxy must override those set on the GatewayClass NginxProxy. Solution: To support NginxProxy at the Gateway level several changes were made to the API. As a result, the API is now at version v1alpha2. Breaking Changes: * Change the scope of the CRD to Namespaced. The parametersRef.namespace field on the GatewayClass is now required. * Make DisableHTTP2 and Telemetry.Exporter.Endpoint optional. New fields: * Telemetry.DisabledFeatures: allows users to explicitly disable telemetry features. It is a list with one supported entry: DisableTracing. More features may be added in future releases. Other changes: * Remove the listType=Map kubebuilder annotation from the RewriteClientIP.TrustedAddresses field. This listType is incorrect since TrustedAddresses can have duplicate keys. The graph now stores NginxProxies that are referenced by the winning GatewayClass and Gateway. This will need to be updated once we support multiple Gateways. The graph is also responsible for merging the NginxProxies when necessary. The result of this is stored on the graph's Gateway object in the field EffectiveNginxProxy. The EffectiveNginxProxy on the Gateway is used to build the NGINX configuration. --- apis/v1alpha1/register.go | 2 - apis/v1alpha1/zz_generated.deepcopy.go | 258 ----- .../nginxproxy_types.go | 48 +- apis/v1alpha2/register.go | 3 +- apis/v1alpha2/zz_generated.deepcopy.go | 273 +++++ .../templates/gatewayclass.yaml | 1 + .../templates/nginxproxy.yaml | 3 +- .../nginx-gateway-fabric/values.schema.json | 11 + charts/nginx-gateway-fabric/values.yaml | 6 + .../bases/gateway.nginx.org_nginxproxies.yaml | 30 +- deploy/aws-nlb/deploy.yaml | 6 +- deploy/azure/deploy.yaml | 6 +- deploy/crds.yaml | 30 +- deploy/default/deploy.yaml | 6 +- deploy/experimental-nginx-plus/deploy.yaml | 4 +- deploy/experimental/deploy.yaml | 6 +- deploy/nginx-plus/deploy.yaml | 4 +- deploy/nodeport/deploy.yaml | 6 +- deploy/openshift/deploy.yaml | 6 +- .../snippets-filters-nginx-plus/deploy.yaml | 4 +- deploy/snippets-filters/deploy.yaml | 6 +- internal/mode/static/manager.go | 4 +- internal/mode/static/manager_test.go | 10 +- .../mode/static/state/change_processor.go | 4 +- .../static/state/change_processor_test.go | 173 ++- .../static/state/conditions/conditions.go | 81 +- .../static/state/dataplane/configuration.go | 92 +- .../state/dataplane/configuration_test.go | 505 ++++---- .../mode/static/state/graph/backend_refs.go | 51 +- .../static/state/graph/backend_refs_test.go | 51 +- internal/mode/static/state/graph/gateway.go | 101 +- .../mode/static/state/graph/gateway_test.go | 356 +++++- .../mode/static/state/graph/gatewayclass.go | 106 +- .../static/state/graph/gatewayclass_test.go | 136 ++- internal/mode/static/state/graph/graph.go | 78 +- .../mode/static/state/graph/graph_test.go | 183 ++- .../mode/static/state/graph/grpcroute_test.go | 34 +- .../mode/static/state/graph/nginxproxy.go | 230 +++- .../static/state/graph/nginxproxy_test.go | 1018 ++++++++++++----- .../mode/static/state/graph/route_common.go | 15 +- internal/mode/static/state/graph/tlsroute.go | 4 +- .../mode/static/state/graph/tlsroute_test.go | 11 +- .../mode/static/status/prepare_requests.go | 2 + .../static/status/prepare_requests_test.go | 111 ++ internal/mode/static/telemetry/collector.go | 5 +- .../mode/static/telemetry/collector_test.go | 15 +- tests/suite/manifests/tracing/nginxproxy.yaml | 2 +- 47 files changed, 2791 insertions(+), 1306 deletions(-) rename apis/{v1alpha1 => v1alpha2}/nginxproxy_types.go (87%) diff --git a/apis/v1alpha1/register.go b/apis/v1alpha1/register.go index 0d18c29eaa..7deb5bfb5c 100644 --- a/apis/v1alpha1/register.go +++ b/apis/v1alpha1/register.go @@ -34,8 +34,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NginxGateway{}, &NginxGatewayList{}, - &NginxProxy{}, - &NginxProxyList{}, &ObservabilityPolicy{}, &ObservabilityPolicyList{}, &ClientSettingsPolicy{}, diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 96100bed3f..65b3b76c30 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -318,159 +318,6 @@ func (in *NginxGatewayStatus) DeepCopy() *NginxGatewayStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { - *out = *in - if in.ErrorLevel != nil { - in, out := &in.ErrorLevel, &out.ErrorLevel - *out = new(NginxErrorLogLevel) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxLogging. -func (in *NginxLogging) DeepCopy() *NginxLogging { - if in == nil { - return nil - } - out := new(NginxLogging) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxPlus) DeepCopyInto(out *NginxPlus) { - *out = *in - if in.AllowedAddresses != nil { - in, out := &in.AllowedAddresses, &out.AllowedAddresses - *out = make([]NginxPlusAllowAddress, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlus. -func (in *NginxPlus) DeepCopy() *NginxPlus { - if in == nil { - return nil - } - out := new(NginxPlus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxPlusAllowAddress) DeepCopyInto(out *NginxPlusAllowAddress) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlusAllowAddress. -func (in *NginxPlusAllowAddress) DeepCopy() *NginxPlusAllowAddress { - if in == nil { - return nil - } - out := new(NginxPlusAllowAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxy) DeepCopyInto(out *NginxProxy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxy. -func (in *NginxProxy) DeepCopy() *NginxProxy { - if in == nil { - return nil - } - out := new(NginxProxy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NginxProxy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxyList) DeepCopyInto(out *NginxProxyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NginxProxy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxyList. -func (in *NginxProxyList) DeepCopy() *NginxProxyList { - if in == nil { - return nil - } - out := new(NginxProxyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NginxProxyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { - *out = *in - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamilyType) - **out = **in - } - if in.Telemetry != nil { - in, out := &in.Telemetry, &out.Telemetry - *out = new(Telemetry) - (*in).DeepCopyInto(*out) - } - if in.RewriteClientIP != nil { - in, out := &in.RewriteClientIP, &out.RewriteClientIP - *out = new(RewriteClientIP) - (*in).DeepCopyInto(*out) - } - if in.Logging != nil { - in, out := &in.Logging, &out.Logging - *out = new(NginxLogging) - (*in).DeepCopyInto(*out) - } - if in.NginxPlus != nil { - in, out := &in.NginxPlus, &out.NginxPlus - *out = new(NginxPlus) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxySpec. -func (in *NginxProxySpec) DeepCopy() *NginxProxySpec { - if in == nil { - return nil - } - out := new(NginxProxySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObservabilityPolicy) DeepCopyInto(out *ObservabilityPolicy) { *out = *in @@ -557,51 +404,6 @@ func (in *ObservabilityPolicySpec) DeepCopy() *ObservabilityPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RewriteClientIP) DeepCopyInto(out *RewriteClientIP) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(RewriteClientIPModeType) - **out = **in - } - if in.SetIPRecursively != nil { - in, out := &in.SetIPRecursively, &out.SetIPRecursively - *out = new(bool) - **out = **in - } - if in.TrustedAddresses != nil { - in, out := &in.TrustedAddresses, &out.TrustedAddresses - *out = make([]RewriteClientIPAddress, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIP. -func (in *RewriteClientIP) DeepCopy() *RewriteClientIP { - if in == nil { - return nil - } - out := new(RewriteClientIP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RewriteClientIPAddress) DeepCopyInto(out *RewriteClientIPAddress) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIPAddress. -func (in *RewriteClientIPAddress) DeepCopy() *RewriteClientIPAddress { - if in == nil { - return nil - } - out := new(RewriteClientIPAddress) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Snippet) DeepCopyInto(out *Snippet) { *out = *in @@ -733,66 +535,6 @@ func (in *SpanAttribute) DeepCopy() *SpanAttribute { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Telemetry) DeepCopyInto(out *Telemetry) { - *out = *in - if in.Exporter != nil { - in, out := &in.Exporter, &out.Exporter - *out = new(TelemetryExporter) - (*in).DeepCopyInto(*out) - } - if in.ServiceName != nil { - in, out := &in.ServiceName, &out.ServiceName - *out = new(string) - **out = **in - } - if in.SpanAttributes != nil { - in, out := &in.SpanAttributes, &out.SpanAttributes - *out = make([]SpanAttribute, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Telemetry. -func (in *Telemetry) DeepCopy() *Telemetry { - if in == nil { - return nil - } - out := new(Telemetry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TelemetryExporter) DeepCopyInto(out *TelemetryExporter) { - *out = *in - if in.Interval != nil { - in, out := &in.Interval, &out.Interval - *out = new(Duration) - **out = **in - } - if in.BatchSize != nil { - in, out := &in.BatchSize, &out.BatchSize - *out = new(int32) - **out = **in - } - if in.BatchCount != nil { - in, out := &in.BatchCount, &out.BatchCount - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryExporter. -func (in *TelemetryExporter) DeepCopy() *TelemetryExporter { - if in == nil { - return nil - } - out := new(TelemetryExporter) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tracing) DeepCopyInto(out *Tracing) { *out = *in diff --git a/apis/v1alpha1/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go similarity index 87% rename from apis/v1alpha1/nginxproxy_types.go rename to apis/v1alpha2/nginxproxy_types.go index ed4ea9ed3d..7c10bd9f3c 100644 --- a/apis/v1alpha1/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -1,15 +1,23 @@ -package v1alpha1 +package v1alpha2 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" +) // +genclient // +kubebuilder:object:root=true // +kubebuilder:storageversion -// +kubebuilder:resource:categories=nginx-gateway-fabric,scope=Cluster +// +kubebuilder:resource:categories=nginx-gateway-fabric,scope=Namespaced // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -// NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way -// to configure global settings for all Gateways defined from the GatewayClass. +// NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef +// or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. +// If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. +// If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass +// reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those +// set on the GatewayClass NginxProxy. type NginxProxy struct { //nolint:govet // standard field alignment, don't change it metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -54,8 +62,10 @@ type NginxProxySpec struct { // +optional NginxPlus *NginxPlus `json:"nginxPlus,omitempty"` // DisableHTTP2 defines if http2 should be disabled for all servers. - // Default is false, meaning http2 will be enabled for all servers. - DisableHTTP2 bool `json:"disableHTTP2,omitempty"` + // If not specified, or set to false, http2 will be enabled for all servers. + // + // +optional + DisableHTTP2 *bool `json:"disableHTTP2,omitempty"` } // NginxPlus specifies NGINX Plus additional settings. These will only be applied if NGINX Plus is being used. @@ -68,6 +78,10 @@ type NginxPlus struct { // Telemetry specifies the OpenTelemetry configuration. type Telemetry struct { + // DisabledFeatures specifies OpenTelemetry features to be disabled. + // + // +optional + DisabledFeatures []DisableTelemetryFeature `json:"disabledFeatures,omitempty"` // Exporter specifies OpenTelemetry export parameters. // // +optional @@ -88,7 +102,7 @@ type Telemetry struct { // +listType=map // +listMapKey=key // +kubebuilder:validation:MaxItems=64 - SpanAttributes []SpanAttribute `json:"spanAttributes,omitempty"` + SpanAttributes []v1alpha1.SpanAttribute `json:"spanAttributes,omitempty"` } // TelemetryExporter specifies OpenTelemetry export parameters. @@ -97,7 +111,7 @@ type TelemetryExporter struct { // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter // // +optional - Interval *Duration `json:"interval,omitempty"` + Interval *v1alpha1.Duration `json:"interval,omitempty"` // BatchSize is the maximum number of spans to be sent in one batch per worker. // Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter @@ -117,8 +131,9 @@ type TelemetryExporter struct { // Format: alphanumeric hostname with optional http scheme and optional port. // //nolint:lll + // +optional // +kubebuilder:validation:Pattern=`^(?:http?:\/\/)?[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)*(?::\d{1,5})?$` - Endpoint string `json:"endpoint"` + Endpoint *string `json:"endpoint,omitempty"` } // RewriteClientIP specifies the configuration for rewriting the client's IP address. @@ -149,15 +164,12 @@ type RewriteClientIP struct { // If a request comes from a trusted address, NGINX will rewrite the client IP information, // and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. // If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - // TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. // To trust all addresses (not recommended for production), set to 0.0.0.0/0. // If no addresses are provided, NGINX will not rewrite the client IP information. // Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from // This field is required if mode is set. // // +optional - // +listType=map - // +listMapKey=type // +kubebuilder:validation:MaxItems=16 TrustedAddresses []RewriteClientIPAddress `json:"trustedAddresses,omitempty"` } @@ -280,3 +292,13 @@ const ( // NginxLogLevelEmerg is the emerg level for NGINX error logs. NginxLogLevelEmerg NginxErrorLogLevel = "emerg" ) + +// DisableTelemetryFeature is a telemetry feature that can be disabled. +// +// +kubebuilder:validation:Enum=DisableTracing +type DisableTelemetryFeature string + +const ( + // DisableTracing disables the OpenTelemetry tracing feature. + DisableTracing DisableTelemetryFeature = "DisableTracing" +) diff --git a/apis/v1alpha2/register.go b/apis/v1alpha2/register.go index 23601e280e..8615a5a3b0 100644 --- a/apis/v1alpha2/register.go +++ b/apis/v1alpha2/register.go @@ -32,11 +32,12 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &NginxProxy{}, + &NginxProxyList{}, &ObservabilityPolicy{}, &ObservabilityPolicyList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil } diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index 77cf20bb07..6e0856a220 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -10,6 +10,164 @@ import ( apisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { + *out = *in + if in.ErrorLevel != nil { + in, out := &in.ErrorLevel, &out.ErrorLevel + *out = new(NginxErrorLogLevel) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxLogging. +func (in *NginxLogging) DeepCopy() *NginxLogging { + if in == nil { + return nil + } + out := new(NginxLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxPlus) DeepCopyInto(out *NginxPlus) { + *out = *in + if in.AllowedAddresses != nil { + in, out := &in.AllowedAddresses, &out.AllowedAddresses + *out = make([]NginxPlusAllowAddress, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlus. +func (in *NginxPlus) DeepCopy() *NginxPlus { + if in == nil { + return nil + } + out := new(NginxPlus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxPlusAllowAddress) DeepCopyInto(out *NginxPlusAllowAddress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxPlusAllowAddress. +func (in *NginxPlusAllowAddress) DeepCopy() *NginxPlusAllowAddress { + if in == nil { + return nil + } + out := new(NginxPlusAllowAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxy) DeepCopyInto(out *NginxProxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxy. +func (in *NginxProxy) DeepCopy() *NginxProxy { + if in == nil { + return nil + } + out := new(NginxProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NginxProxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxyList) DeepCopyInto(out *NginxProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NginxProxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxyList. +func (in *NginxProxyList) DeepCopy() *NginxProxyList { + if in == nil { + return nil + } + out := new(NginxProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NginxProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { + *out = *in + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamilyType) + **out = **in + } + if in.Telemetry != nil { + in, out := &in.Telemetry, &out.Telemetry + *out = new(Telemetry) + (*in).DeepCopyInto(*out) + } + if in.RewriteClientIP != nil { + in, out := &in.RewriteClientIP, &out.RewriteClientIP + *out = new(RewriteClientIP) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(NginxLogging) + (*in).DeepCopyInto(*out) + } + if in.NginxPlus != nil { + in, out := &in.NginxPlus, &out.NginxPlus + *out = new(NginxPlus) + (*in).DeepCopyInto(*out) + } + if in.DisableHTTP2 != nil { + in, out := &in.DisableHTTP2, &out.DisableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxySpec. +func (in *NginxProxySpec) DeepCopy() *NginxProxySpec { + if in == nil { + return nil + } + out := new(NginxProxySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObservabilityPolicy) DeepCopyInto(out *ObservabilityPolicy) { *out = *in @@ -96,6 +254,121 @@ func (in *ObservabilityPolicySpec) DeepCopy() *ObservabilityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteClientIP) DeepCopyInto(out *RewriteClientIP) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(RewriteClientIPModeType) + **out = **in + } + if in.SetIPRecursively != nil { + in, out := &in.SetIPRecursively, &out.SetIPRecursively + *out = new(bool) + **out = **in + } + if in.TrustedAddresses != nil { + in, out := &in.TrustedAddresses, &out.TrustedAddresses + *out = make([]RewriteClientIPAddress, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIP. +func (in *RewriteClientIP) DeepCopy() *RewriteClientIP { + if in == nil { + return nil + } + out := new(RewriteClientIP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteClientIPAddress) DeepCopyInto(out *RewriteClientIPAddress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteClientIPAddress. +func (in *RewriteClientIPAddress) DeepCopy() *RewriteClientIPAddress { + if in == nil { + return nil + } + out := new(RewriteClientIPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Telemetry) DeepCopyInto(out *Telemetry) { + *out = *in + if in.DisabledFeatures != nil { + in, out := &in.DisabledFeatures, &out.DisabledFeatures + *out = make([]DisableTelemetryFeature, len(*in)) + copy(*out, *in) + } + if in.Exporter != nil { + in, out := &in.Exporter, &out.Exporter + *out = new(TelemetryExporter) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SpanAttributes != nil { + in, out := &in.SpanAttributes, &out.SpanAttributes + *out = make([]v1alpha1.SpanAttribute, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Telemetry. +func (in *Telemetry) DeepCopy() *Telemetry { + if in == nil { + return nil + } + out := new(Telemetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryExporter) DeepCopyInto(out *TelemetryExporter) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(v1alpha1.Duration) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int32) + **out = **in + } + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(int32) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryExporter. +func (in *TelemetryExporter) DeepCopy() *TelemetryExporter { + if in == nil { + return nil + } + out := new(TelemetryExporter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tracing) DeepCopyInto(out *Tracing) { *out = *in diff --git a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml index ee08e1a726..aecd54e8ad 100644 --- a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml +++ b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml @@ -17,4 +17,5 @@ spec: group: gateway.nginx.org kind: NginxProxy name: {{ include "nginx-gateway.proxy-config-name" . }} + namespace: {{ .Release.Namespace }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml index 4214158b75..bc4105ee37 100644 --- a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml +++ b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml @@ -1,8 +1,9 @@ {{- if .Values.nginx.config }} -apiVersion: gateway.nginx.org/v1alpha1 +apiVersion: gateway.nginx.org/v1alpha2 kind: NginxProxy metadata: name: {{ include "nginx-gateway.proxy-config-name" . }} + namespace: {{ .Release.Namespace }} labels: {{- include "nginx-gateway.labels" . | nindent 4 }} spec: diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 84ea47c5b8..36734ca989 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -165,6 +165,17 @@ "telemetry": { "description": "Telemetry specifies the OpenTelemetry configuration.", "properties": { + "disabledFeatures": { + "items": { + "enum": [ + "DisableTracing" + ], + "required": [], + "type": "string" + }, + "required": [], + "type": "array" + }, "exporter": { "properties": { "batchCount": { diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index 4cdd1b42db..ff4fc28630 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -236,6 +236,12 @@ nginx: # pattern: ^([^"$\\]|\\[^$])*$ # minLength: 1 # maxLength: 255 + # disabledFeatures: + # type: array + # items: + # type: string + # enum: + # - DisableTracing # logging: # type: object # description: Logging defines logging related settings for NGINX. diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index 83f89a9ff9..d0772be9f6 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -14,18 +14,22 @@ spec: listKind: NginxProxyList plural: nginxproxies singular: nginxproxy - scope: Cluster + scope: Namespaced versions: - additionalPrinterColumns: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1alpha2 schema: openAPIV3Schema: description: |- - NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way - to configure global settings for all Gateways defined from the GatewayClass. + NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef + or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. + If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. + If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass + reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those + set on the GatewayClass NginxProxy. properties: apiVersion: description: |- @@ -50,7 +54,7 @@ spec: disableHTTP2: description: |- DisableHTTP2 defines if http2 should be disabled for all servers. - Default is false, meaning http2 will be enabled for all servers. + If not specified, or set to false, http2 will be enabled for all servers. type: boolean ipFamily: default: dual @@ -141,7 +145,6 @@ spec: If a request comes from a trusted address, NGINX will rewrite the client IP information, and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. To trust all addresses (not recommended for production), set to 0.0.0.0/0. If no addresses are provided, NGINX will not rewrite the client IP information. Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from @@ -166,9 +169,6 @@ spec: type: object maxItems: 16 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map type: object x-kubernetes-validations: - message: if mode is set, trustedAddresses is a required field @@ -177,6 +177,16 @@ spec: telemetry: description: Telemetry specifies the OpenTelemetry configuration. properties: + disabledFeatures: + description: DisabledFeatures specifies OpenTelemetry features + to be disabled. + items: + description: DisableTelemetryFeature is a telemetry feature + that can be disabled. + enum: + - DisableTracing + type: string + type: array exporter: description: Exporter specifies OpenTelemetry export parameters. properties: @@ -206,8 +216,6 @@ spec: Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter pattern: ^[0-9]{1,4}(ms|s|m|h)?$ type: string - required: - - endpoint type: object serviceName: description: |- diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index 6bc4dff222..98dddfdc9b 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -324,7 +324,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -366,7 +366,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -381,7 +381,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 99ee0c2be2..236c62288b 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -323,7 +323,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -365,7 +365,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -380,7 +380,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 7891f908c7..8a4a379c83 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -599,18 +599,22 @@ spec: listKind: NginxProxyList plural: nginxproxies singular: nginxproxy - scope: Cluster + scope: Namespaced versions: - additionalPrinterColumns: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1alpha2 schema: openAPIV3Schema: description: |- - NginxProxy is a configuration object that is attached to a GatewayClass parametersRef. It provides a way - to configure global settings for all Gateways defined from the GatewayClass. + NginxProxy is a configuration object that can be referenced from a GatewayClass parametersRef + or a Gateway infrastructure.parametersRef. It provides a way to configure data plane settings. + If referenced from a GatewayClass, the settings apply to all Gateways attached to the GatewayClass. + If referenced from a Gateway, the settings apply to that Gateway alone. If both a Gateway and its GatewayClass + reference an NginxProxy, the settings are merged. Settings specified on the Gateway NginxProxy override those + set on the GatewayClass NginxProxy. properties: apiVersion: description: |- @@ -635,7 +639,7 @@ spec: disableHTTP2: description: |- DisableHTTP2 defines if http2 should be disabled for all servers. - Default is false, meaning http2 will be enabled for all servers. + If not specified, or set to false, http2 will be enabled for all servers. type: boolean ipFamily: default: dual @@ -726,7 +730,6 @@ spec: If a request comes from a trusted address, NGINX will rewrite the client IP information, and forward it to the backend in the X-Forwarded-For* and X-Real-IP headers. If the request does not come from a trusted address, NGINX will not rewrite the client IP information. - TrustedAddresses only supports CIDR blocks: 192.33.21.1/24, fe80::1/64. To trust all addresses (not recommended for production), set to 0.0.0.0/0. If no addresses are provided, NGINX will not rewrite the client IP information. Sets NGINX directive set_real_ip_from: https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from @@ -751,9 +754,6 @@ spec: type: object maxItems: 16 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map type: object x-kubernetes-validations: - message: if mode is set, trustedAddresses is a required field @@ -762,6 +762,16 @@ spec: telemetry: description: Telemetry specifies the OpenTelemetry configuration. properties: + disabledFeatures: + description: DisabledFeatures specifies OpenTelemetry features + to be disabled. + items: + description: DisableTelemetryFeature is a telemetry feature + that can be disabled. + enum: + - DisableTracing + type: string + type: array exporter: description: Exporter specifies OpenTelemetry export parameters. properties: @@ -791,8 +801,6 @@ spec: Default: https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter pattern: ^[0-9]{1,4}(ms|s|m|h)?$ type: string - required: - - endpoint type: object serviceName: description: |- diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index d9b557e4d7..c066b92710 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -321,7 +321,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -363,7 +363,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -378,7 +378,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index c23de692ed..ae5ca25e93 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -389,7 +389,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -407,7 +407,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index e6cd3b16b4..16c1d7c10f 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -327,7 +327,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -369,7 +369,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -384,7 +384,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 479e20805c..2ab6da27dd 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -383,7 +383,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -401,7 +401,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index c39299e838..a016150ab7 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -321,7 +321,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -363,7 +363,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -378,7 +378,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index ea1e5f43f9..43ba8df5fb 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -329,7 +329,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -371,7 +371,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -386,7 +386,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 435319441e..1206af4707 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -386,7 +386,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -404,7 +404,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index e97a00d98f..98d20a0ea4 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -324,7 +324,7 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment spec: containers: - - image: ghcr.io/nginxinc/nginx-gateway-fabric/nginx:edge + - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge imagePullPolicy: Always name: nginx ports: @@ -366,7 +366,7 @@ spec: - /usr/bin/gateway - sleep - --duration=15s - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: sleep - command: @@ -381,7 +381,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: ghcr.io/nginxinc/nginx-gateway-fabric:edge + image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: init securityContext: diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 459a79b5e4..70413ffe05 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -447,7 +447,7 @@ func registerControllers( }, }, { - objectType: &ngfAPIv1alpha1.NginxProxy{}, + objectType: &ngfAPIv1alpha2.NginxProxy{}, options: []controller.Option{ controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), }, @@ -708,7 +708,7 @@ func prepareFirstEventBatchPreparerArgs(cfg config.Config) ([]client.Object, []c &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, &gatewayv1.GRPCRouteList{}, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, &ngfAPIv1alpha2.ObservabilityPolicyList{}, diff --git a/internal/mode/static/manager_test.go b/internal/mode/static/manager_test.go index 5361be4deb..98a6146905 100644 --- a/internal/mode/static/manager_test.go +++ b/internal/mode/static/manager_test.go @@ -63,7 +63,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &gatewayv1.HTTPRouteList{}, &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, &gatewayv1.GRPCRouteList{}, partialObjectMetadataList, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, @@ -93,7 +93,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, &gatewayv1.GRPCRouteList{}, partialObjectMetadataList, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, @@ -124,7 +124,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1alpha3.BackendTLSPolicyList{}, &gatewayv1alpha2.TLSRouteList{}, @@ -156,7 +156,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1.GRPCRouteList{}, &ngfAPIv1alpha1.ClientSettingsPolicyList{}, @@ -188,7 +188,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha1.NginxProxyList{}, + &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, &gatewayv1alpha3.BackendTLSPolicyList{}, &gatewayv1alpha2.TLSRouteList{}, diff --git a/internal/mode/static/state/change_processor.go b/internal/mode/static/state/change_processor.go index 3f5122df5a..426feff686 100644 --- a/internal/mode/static/state/change_processor.go +++ b/internal/mode/static/state/change_processor.go @@ -109,7 +109,7 @@ func NewChangeProcessorImpl(cfg ChangeProcessorConfig) *ChangeProcessorImpl { CRDMetadata: make(map[types.NamespacedName]*metav1.PartialObjectMetadata), BackendTLSPolicies: make(map[types.NamespacedName]*v1alpha3.BackendTLSPolicy), ConfigMaps: make(map[types.NamespacedName]*apiv1.ConfigMap), - NginxProxies: make(map[types.NamespacedName]*ngfAPIv1alpha1.NginxProxy), + NginxProxies: make(map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy), GRPCRoutes: make(map[types.NamespacedName]*v1.GRPCRoute), TLSRoutes: make(map[types.NamespacedName]*v1alpha2.TLSRoute), NGFPolicies: make(map[graph.PolicyKey]policies.Policy), @@ -203,7 +203,7 @@ func NewChangeProcessorImpl(cfg ChangeProcessorConfig) *ChangeProcessorImpl { predicate: annotationChangedPredicate{annotation: gatewayclass.BundleVersionAnnotation}, }, { - gvk: cfg.MustExtractGVK(&ngfAPIv1alpha1.NginxProxy{}), + gvk: cfg.MustExtractGVK(&ngfAPIv1alpha2.NginxProxy{}), store: newObjectStoreMapAdapter(clusterStore.NginxProxies), predicate: funcPredicate{stateChanged: isReferenced}, }, diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index d71ebd5212..b25f3fbd99 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -2368,56 +2368,135 @@ var _ = Describe("ChangeProcessor", func() { }) Describe("NginxProxy resource changes", Ordered, func() { - paramGC := gc.DeepCopy() - paramGC.Spec.ParametersRef = &v1beta1.ParametersReference{ - Group: ngfAPIv1alpha1.GroupName, - Kind: kinds.NginxProxy, - Name: "np", - } + Context("referenced by a GatewayClass", func() { + paramGC := gc.DeepCopy() + paramGC.Spec.ParametersRef = &v1beta1.ParametersReference{ + Group: ngfAPIv1alpha1.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + Namespace: helpers.GetPointer[v1.Namespace]("test"), + } - np := &ngfAPIv1alpha1.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np", - }, - } + np := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np", + Namespace: "test", + }, + } - npUpdated := &ngfAPIv1alpha1.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np", - }, - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-svc:123", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + npUpdated := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np", + Namespace: "test", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-svc:123"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, }, }, - }, - } - It("handles upserts for an NginxProxy", func() { - processor.CaptureUpsertChange(np) - processor.CaptureUpsertChange(paramGC) + } + It("handles upserts for an NginxProxy", func() { + processor.CaptureUpsertChange(np) + processor.CaptureUpsertChange(paramGC) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy.Source).To(Equal(np)) - }) - It("captures changes for an NginxProxy", func() { - processor.CaptureUpsertChange(npUpdated) - processor.CaptureUpsertChange(paramGC) + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(np)) + }) + It("captures changes for an NginxProxy", func() { + processor.CaptureUpsertChange(npUpdated) + processor.CaptureUpsertChange(paramGC) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy.Source).To(Equal(npUpdated)) + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(npUpdated)) + }) + It("handles deletes for an NginxProxy", func() { + processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) + + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.GatewayClass.NginxProxy).To(BeNil()) + }) }) - It("handles deletes for an NginxProxy", func() { - processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) + Context("referenced by a Gateway", func() { + paramGW := &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "param-gw", + Generation: 1, + }, + Spec: v1.GatewaySpec{ + GatewayClassName: gcName, + Listeners: []v1.Listener{ + { + Name: httpListenerName, + Hostname: nil, + Port: 80, + Protocol: v1.HTTPProtocolType, + }, + }, + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: ngfAPIv1alpha1.GroupName, + Kind: kinds.NginxProxy, + Name: "np-gw", + }, + }, + }, + } - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.NginxProxy).To(BeNil()) + np := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-gw", + Namespace: "test", + }, + } + + npUpdated := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-gw", + Namespace: "test", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-svc:123"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, + }, + }, + } + It("handles upserts for an NginxProxy", func() { + processor.CaptureUpsertChange(np) + processor.CaptureUpsertChange(paramGW) + + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.Gateway.NginxProxy.Source).To(Equal(np)) + }) + It("captures changes for an NginxProxy", func() { + processor.CaptureUpsertChange(npUpdated) + processor.CaptureUpsertChange(paramGW) + + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.Gateway.NginxProxy.Source).To(Equal(npUpdated)) + }) + It("handles deletes for an NginxProxy", func() { + processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) + + changed, graph := processor.Process() + Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(graph.Gateway.NginxProxy).To(BeNil()) + }) }) }) @@ -2710,7 +2789,7 @@ var _ = Describe("ChangeProcessor", func() { secret, secretUpdated, unrelatedSecret, barSecret, barSecretUpdated *apiv1.Secret cm, cmUpdated, unrelatedCM *apiv1.ConfigMap btls, btlsUpdated *v1alpha3.BackendTLSPolicy - np, npUpdated *ngfAPIv1alpha1.NginxProxy + np, npUpdated *ngfAPIv1alpha2.NginxProxy ) BeforeEach(OncePerOrdered, func() { @@ -3009,12 +3088,12 @@ var _ = Describe("ChangeProcessor", func() { btlsUpdated = btls.DeepCopy() npNsName = types.NamespacedName{Name: "np-1"} - np = &ngfAPIv1alpha1.NginxProxy{ + np = &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ Name: npNsName.Name, }, - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), }, }, @@ -3089,7 +3168,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&v1beta1.ReferenceGrant{}, rgNsName) processor.CaptureDeleteChange(&v1alpha3.BackendTLSPolicy{}, btlsNsName) processor.CaptureDeleteChange(&apiv1.ConfigMap{}, cmNsName) - processor.CaptureDeleteChange(&ngfAPIv1alpha1.NginxProxy{}, npNsName) + processor.CaptureDeleteChange(&ngfAPIv1alpha2.NginxProxy{}, npNsName) // these are non-changing changes processor.CaptureUpsertChange(gw2) diff --git a/internal/mode/static/state/conditions/conditions.go b/internal/mode/static/state/conditions/conditions.go index 84ba2d971a..00feac778a 100644 --- a/internal/mode/static/state/conditions/conditions.go +++ b/internal/mode/static/state/conditions/conditions.go @@ -87,6 +87,10 @@ const ( // parametersRef resource does not exist. GatewayClassReasonParamsRefNotFound v1.GatewayClassConditionReason = "ParametersRefNotFound" + // GatewayClassReasonParamsRefInvalid is used with the "GatewayClassResolvedRefs" condition when the + // parametersRef resource is invalid. + GatewayClassReasonParamsRefInvalid v1.GatewayClassConditionReason = "ParametersRefInvalid" + // PolicyReasonNginxProxyConfigNotSet is used with the "PolicyAccepted" condition when the // NginxProxy resource is missing or invalid. PolicyReasonNginxProxyConfigNotSet v1alpha2.PolicyConditionReason = "NginxProxyConfigNotSet" @@ -106,6 +110,21 @@ const ( // GatewayIgnoredReason is used with v1.RouteConditionAccepted when the route references a Gateway that is ignored // by NGF. GatewayIgnoredReason v1.RouteConditionReason = "GatewayIgnored" + + // GatewayResolvedRefs condition indicates whether the controller was able to resolve the + // parametersRef on the Gateway. + GatewayResolvedRefs v1.GatewayConditionType = "ResolvedRefs" + + // GatewayReasonResolvedRefs is used with the "GatewayResolvedRefs" condition when the condition is true. + GatewayReasonResolvedRefs v1.GatewayConditionReason = "ResolvedRefs" + + // GatewayReasonParamsRefNotFound is used with the "GatewayResolvedRefs" condition when the + // parametersRef resource does not exist. + GatewayReasonParamsRefNotFound v1.GatewayConditionReason = "ParametersRefNotFound" + + // GatewayReasonParamsRefInvalid is used with the "GatewayResolvedRefs" condition when the + // parametersRef resource is invalid. + GatewayReasonParamsRefInvalid v1.GatewayConditionReason = "ParametersRefInvalid" ) // NewRouteNotAcceptedGatewayIgnored returns a Condition that indicates that the Route is not accepted by the Gateway @@ -514,7 +533,7 @@ func NewGatewayClassResolvedRefs() conditions.Condition { Type: string(GatewayClassResolvedRefs), Status: metav1.ConditionTrue, Reason: string(GatewayClassReasonResolvedRefs), - Message: "parametersRef resource is resolved", + Message: "ParametersRef resource is resolved", } } @@ -525,7 +544,18 @@ func NewGatewayClassRefNotFound() conditions.Condition { Type: string(GatewayClassResolvedRefs), Status: metav1.ConditionFalse, Reason: string(GatewayClassReasonParamsRefNotFound), - Message: "parametersRef resource could not be found", + Message: "ParametersRef resource could not be found", + } +} + +// NewGatewayClassRefInvalid returns a Condition that indicates that the parametersRef +// on the GatewayClass could not be resolved because the resource it references is invalid. +func NewGatewayClassRefInvalid(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(GatewayClassResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayClassReasonParamsRefInvalid), + Message: msg, } } @@ -537,7 +567,7 @@ func NewGatewayClassInvalidParameters(msg string) conditions.Condition { Type: string(v1.GatewayClassConditionStatusAccepted), Status: metav1.ConditionTrue, Reason: string(v1.GatewayClassReasonInvalidParameters), - Message: fmt.Sprintf("GatewayClass is accepted, but parametersRef is ignored due to an error: %s", msg), + Message: fmt.Sprintf("GatewayClass is accepted, but ParametersRef is ignored due to an error: %s", msg), } } @@ -684,6 +714,51 @@ func NewNginxGatewayInvalid(msg string) conditions.Condition { } } +// NewGatewayResolvedRefs returns a Condition that indicates that the parametersRef +// on the Gateway is resolved. +func NewGatewayResolvedRefs() conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionTrue, + Reason: string(GatewayReasonResolvedRefs), + Message: "ParametersRef resource is resolved", + } +} + +// NewGatewayRefNotFound returns a Condition that indicates that the parametersRef +// on the Gateway could not be resolved. +func NewGatewayRefNotFound() conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayReasonParamsRefNotFound), + Message: "ParametersRef resource could not be found", + } +} + +// NewGatewayRefInvalid returns a Condition that indicates that the parametersRef +// on the Gateway could not be resolved because the referenced resource is invalid. +func NewGatewayRefInvalid(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(GatewayResolvedRefs), + Status: metav1.ConditionFalse, + Reason: string(GatewayReasonParamsRefInvalid), + Message: msg, + } +} + +// NewGatewayInvalidParameters returns a Condition that indicates that the Gateway has invalid parameters. +// We are allowing Accepted to still be true to prevent nullifying the entire Gateway config if a parametersRef +// is updated to something invalid. +func NewGatewayInvalidParameters(msg string) conditions.Condition { + return conditions.Condition{ + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + Reason: string(v1.GatewayReasonInvalidParameters), + Message: fmt.Sprintf("Gateway is accepted, but ParametersRef is ignored due to an error: %s", msg), + } +} + // NewPolicyAccepted returns a Condition that indicates that the Policy is accepted. func NewPolicyAccepted() conditions.Condition { return conditions.Condition{ diff --git a/internal/mode/static/state/dataplane/configuration.go b/internal/mode/static/state/dataplane/configuration.go index 84dc4b0cdb..7945a02da6 100644 --- a/internal/mode/static/state/dataplane/configuration.go +++ b/internal/mode/static/state/dataplane/configuration.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "slices" "sort" discoveryV1 "k8s.io/api/discovery/v1" @@ -816,22 +817,42 @@ func generateCertBundleID(caCertRef types.NamespacedName) CertBundleID { return CertBundleID(fmt.Sprintf("cert_bundle_%s_%s", caCertRef.Namespace, caCertRef.Name)) } +func telemetryEnabled(gw *graph.Gateway) bool { + if gw == nil { + return false + } + + if gw.EffectiveNginxProxy == nil || gw.EffectiveNginxProxy.Telemetry == nil { + return false + } + + tel := gw.EffectiveNginxProxy.Telemetry + + if slices.Contains(tel.DisabledFeatures, ngfAPIv1alpha2.DisableTracing) { + return false + } + + if tel.Exporter == nil || tel.Exporter.Endpoint == nil { + return false + } + + return true +} + // buildTelemetry generates the Otel configuration. func buildTelemetry(g *graph.Graph) Telemetry { - if g.NginxProxy == nil || !g.NginxProxy.Valid || - g.NginxProxy.Source.Spec.Telemetry == nil || - g.NginxProxy.Source.Spec.Telemetry.Exporter == nil { + if !telemetryEnabled(g.Gateway) { return Telemetry{} } serviceName := fmt.Sprintf("ngf:%s:%s", g.Gateway.Source.Namespace, g.Gateway.Source.Name) - telemetry := g.NginxProxy.Source.Spec.Telemetry + telemetry := g.Gateway.EffectiveNginxProxy.Telemetry if telemetry.ServiceName != nil { serviceName = serviceName + ":" + *telemetry.ServiceName } tel := Telemetry{ - Endpoint: telemetry.Exporter.Endpoint, + Endpoint: *telemetry.Exporter.Endpoint, // safe to deref here since we verified that telemetry is enabled ServiceName: serviceName, } @@ -895,41 +916,44 @@ func buildBaseHTTPConfig(g *graph.Graph) BaseHTTPConfig { IPFamily: Dual, Snippets: buildSnippetsForContext(g.SnippetsFilters, ngfAPIv1alpha1.NginxContextHTTP), } - if g.NginxProxy == nil || !g.NginxProxy.Valid { + + // safe to access EffectiveNginxProxy since we only call this function when the Gateway is not nil. + np := g.Gateway.EffectiveNginxProxy + if np == nil { return baseConfig } - if g.NginxProxy.Source.Spec.DisableHTTP2 { + if np.DisableHTTP2 != nil && *np.DisableHTTP2 { baseConfig.HTTP2 = false } - if g.NginxProxy.Source.Spec.IPFamily != nil { - switch *g.NginxProxy.Source.Spec.IPFamily { - case ngfAPIv1alpha1.IPv4: + if np.IPFamily != nil { + switch *np.IPFamily { + case ngfAPIv1alpha2.IPv4: baseConfig.IPFamily = IPv4 - case ngfAPIv1alpha1.IPv6: + case ngfAPIv1alpha2.IPv6: baseConfig.IPFamily = IPv6 } } - if g.NginxProxy.Source.Spec.RewriteClientIP != nil { - if g.NginxProxy.Source.Spec.RewriteClientIP.Mode != nil { - switch *g.NginxProxy.Source.Spec.RewriteClientIP.Mode { - case ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol: + if np.RewriteClientIP != nil { + if np.RewriteClientIP.Mode != nil { + switch *np.RewriteClientIP.Mode { + case ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol: baseConfig.RewriteClientIPSettings.Mode = RewriteIPModeProxyProtocol - case ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor: + case ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor: baseConfig.RewriteClientIPSettings.Mode = RewriteIPModeXForwardedFor } } - if len(g.NginxProxy.Source.Spec.RewriteClientIP.TrustedAddresses) > 0 { + if len(np.RewriteClientIP.TrustedAddresses) > 0 { baseConfig.RewriteClientIPSettings.TrustedAddresses = convertAddresses( - g.NginxProxy.Source.Spec.RewriteClientIP.TrustedAddresses, + np.RewriteClientIP.TrustedAddresses, ) } - if g.NginxProxy.Source.Spec.RewriteClientIP.SetIPRecursively != nil { - baseConfig.RewriteClientIPSettings.IPRecursive = *g.NginxProxy.Source.Spec.RewriteClientIP.SetIPRecursively + if np.RewriteClientIP.SetIPRecursively != nil { + baseConfig.RewriteClientIPSettings.IPRecursive = *np.RewriteClientIP.SetIPRecursively } } @@ -993,7 +1017,7 @@ func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { return finalPolicies } -func convertAddresses(addresses []ngfAPIv1alpha1.RewriteClientIPAddress) []string { +func convertAddresses(addresses []ngfAPIv1alpha2.RewriteClientIPAddress) []string { trustedAddresses := make([]string, len(addresses)) for i, addr := range addresses { trustedAddresses[i] = addr.Value @@ -1004,10 +1028,14 @@ func convertAddresses(addresses []ngfAPIv1alpha1.RewriteClientIPAddress) []strin func buildLogging(g *graph.Graph) Logging { logSettings := Logging{ErrorLevel: defaultErrorLogLevel} - ngfProxy := g.NginxProxy - if ngfProxy != nil && ngfProxy.Source.Spec.Logging != nil { - if ngfProxy.Source.Spec.Logging.ErrorLevel != nil { - logSettings.ErrorLevel = string(*ngfProxy.Source.Spec.Logging.ErrorLevel) + if g.Gateway == nil || g.Gateway.EffectiveNginxProxy == nil { + return logSettings + } + + ngfProxy := g.Gateway.EffectiveNginxProxy + if ngfProxy.Logging != nil { + if ngfProxy.Logging.ErrorLevel != nil { + logSettings.ErrorLevel = string(*ngfProxy.Logging.ErrorLevel) } } @@ -1031,11 +1059,15 @@ func buildAuxiliarySecrets( func buildNginxPlus(g *graph.Graph) NginxPlus { nginxPlusSettings := NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} - ngfProxy := g.NginxProxy - if ngfProxy != nil && ngfProxy.Source.Spec.NginxPlus != nil { - if ngfProxy.Source.Spec.NginxPlus.AllowedAddresses != nil { - addresses := make([]string, 0, len(ngfProxy.Source.Spec.NginxPlus.AllowedAddresses)) - for _, addr := range ngfProxy.Source.Spec.NginxPlus.AllowedAddresses { + if g.Gateway == nil || g.Gateway.EffectiveNginxProxy == nil { + return nginxPlusSettings + } + + ngfProxy := g.Gateway.EffectiveNginxProxy + if ngfProxy.NginxPlus != nil { + if ngfProxy.NginxPlus.AllowedAddresses != nil { + addresses := make([]string, 0, len(ngfProxy.NginxPlus.AllowedAddresses)) + for _, addr := range ngfProxy.NginxPlus.AllowedAddresses { addresses = append(addresses, addr.Value) } diff --git a/internal/mode/static/state/dataplane/configuration_test.go b/internal/mode/static/state/dataplane/configuration_test.go index b13a7845a5..8aed106e7e 100644 --- a/internal/mode/static/state/dataplane/configuration_test.go +++ b/internal/mode/static/state/dataplane/configuration_test.go @@ -901,43 +901,26 @@ func TestBuildConfiguration(t *testing.T) { }, } - nginxProxy := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-otel.svc:4563", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), - }, - ServiceName: helpers.GetPointer("my-svc"), - }, - DisableHTTP2: true, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.Dual), + nginxProxy := &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), }, + ServiceName: helpers.GetPointer("my-svc"), }, - Valid: true, + DisableHTTP2: helpers.GetPointer(true), + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), } - nginxProxyIPv4 := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{}, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.IPv4), - }, - }, - Valid: true, + nginxProxyIPv4 := &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv4), } - nginxProxyIPv6 := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{}, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.IPv6), - }, - }, - Valid: true, + nginxProxyIPv6 := &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), } defaultConfig := Configuration{ @@ -2165,7 +2148,7 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxy + g.Gateway.EffectiveNginxProxy = nginxProxy return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2183,42 +2166,7 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: false, IPFamily: Dual} return conf }), - msg: "NginxProxy with tracing config and http2 disabled", - }, - { - graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", - } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, - }) - g.NginxProxy = &graph.NginxProxy{ - Valid: false, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - DisableHTTP2: true, - IPFamily: helpers.GetPointer(ngfAPIv1alpha1.Dual), - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "some-endpoint", - }, - }, - }, - }, - } - return g - }), - expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { - conf.SSLServers = []VirtualServer{} - conf.SSLKeyPairs = map[SSLKeyPairID]SSLKeyPair{} - return conf - }), - msg: "invalid NginxProxy", + msg: "EffectiveNginxProxy with tracing config and http2 disabled", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2327,7 +2275,7 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxyIPv4 + g.Gateway.EffectiveNginxProxy = nginxProxyIPv4 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2336,7 +2284,7 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: true, IPFamily: IPv4} return conf }), - msg: "NginxProxy with IPv4 IPFamily and no routes", + msg: "GatewayClass has NginxProxy with IPv4 IPFamily and no routes", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2350,7 +2298,7 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = nginxProxyIPv6 + g.Gateway.EffectiveNginxProxy = nginxProxyIPv6 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2359,7 +2307,7 @@ func TestBuildConfiguration(t *testing.T) { conf.BaseHTTPConfig = BaseHTTPConfig{HTTP2: true, IPFamily: IPv6} return conf }), - msg: "NginxProxy with IPv6 IPFamily and no routes", + msg: "GatewayClass has NginxProxy with IPv6 IPFamily and no routes", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2373,21 +2321,16 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "1.1.1.1/32", - }, - }, - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol), + g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + SetIPRecursively: helpers.GetPointer(true), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "1.1.1.1/32", }, }, + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, } return g @@ -2406,7 +2349,7 @@ func TestBuildConfiguration(t *testing.T) { } return conf }), - msg: "NginxProxy with rewriteClientIP details set", + msg: "GatewayClass has NginxProxy with rewriteClientIP details set", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2420,12 +2363,9 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelDebug)}, - }, + g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, } return g @@ -2436,7 +2376,7 @@ func TestBuildConfiguration(t *testing.T) { conf.Logging = Logging{ErrorLevel: "debug"} return conf }), - msg: "NginxProxy with error log level set to debug", + msg: "GatewayClass has NginxProxy with error log level set to debug", }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { @@ -2486,16 +2426,11 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, - }, + g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, } @@ -2580,16 +2515,11 @@ func TestBuildConfiguration_Plus(t *testing.T) { Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.NginxProxy = &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, - }, + g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, } @@ -3593,24 +3523,19 @@ func TestConvertBackendTLS(t *testing.T) { func TestBuildTelemetry(t *testing.T) { t.Parallel() - telemetryConfigured := &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{ - Endpoint: "my-otel.svc:4563", - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), - }, - ServiceName: helpers.GetPointer("my-svc"), - SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ - {Key: "key", Value: "value"}, - }, - }, + telemetryConfigured := &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, }, }, - Valid: true, } createTelemetry := func() Telemetry { @@ -3636,10 +3561,24 @@ func TestBuildTelemetry(t *testing.T) { msg string expTelemetry Telemetry }{ + { + g: &graph.Graph{}, + expTelemetry: Telemetry{}, + msg: "nil Gateway", + }, + { + g: &graph.Graph{ + Gateway: &graph.Gateway{ + EffectiveNginxProxy: nil, + }, + }, + expTelemetry: Telemetry{}, + msg: "nil effective NginxProxy", + }, { g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, }, }, expTelemetry: Telemetry{}, @@ -3647,19 +3586,49 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Telemetry: &ngfAPIv1alpha1.Telemetry{ - Exporter: &ngfAPIv1alpha1.TelemetryExporter{}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + }, + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ + ngfAPIv1alpha2.DisableTracing, + }, + }, + }, + }, + }, + expTelemetry: Telemetry{}, + msg: "Telemetry disabled explicitly", + }, + { + g: &graph.Graph{ + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: nil, + }, + }, + }, + }, + expTelemetry: Telemetry{}, + msg: "Telemetry disabled implicitly (nil exporter)", + }, + { + g: &graph.Graph{ + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: nil, }, }, }, - Valid: false, }, }, expTelemetry: Telemetry{}, - msg: "Invalid NginxProxy configured", + msg: "Telemetry disabled implicitly (nil exporter endpoint)", }, { g: &graph.Graph{ @@ -3670,8 +3639,8 @@ func TestBuildTelemetry(t *testing.T) { Namespace: "ns", }, }, + EffectiveNginxProxy: telemetryConfigured, }, - NginxProxy: telemetryConfigured, }, expTelemetry: createTelemetry(), msg: "Telemetry configured", @@ -3685,8 +3654,8 @@ func TestBuildTelemetry(t *testing.T) { Namespace: "ns", }, }, + EffectiveNginxProxy: telemetryConfigured, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -3720,8 +3689,8 @@ func TestBuildTelemetry(t *testing.T) { Namespace: "ns", }, }, + EffectiveNginxProxy: telemetryConfigured, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -3790,8 +3759,8 @@ func TestBuildTelemetry(t *testing.T) { Namespace: "ns", }, }, + EffectiveNginxProxy: telemetryConfigured, }, - NginxProxy: telemetryConfigured, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { Source: &ngfAPIv1alpha2.ObservabilityPolicy{ @@ -4227,9 +4196,8 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "no rewrite IP settings configured", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, }, }, expRewriteIPSettings: RewriteClientIPSettings{}, @@ -4237,20 +4205,17 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with proxyProtocol", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeProxyProtocol), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "10.9.9.4/32", - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "10.9.9.4/32", }, - SetIPRecursively: helpers.GetPointer(true), }, + SetIPRecursively: helpers.GetPointer(true), }, }, }, @@ -4264,20 +4229,17 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with xForwardedFor", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "76.89.90.11/24", - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "76.89.90.11/24", }, - SetIPRecursively: helpers.GetPointer(true), }, + SetIPRecursively: helpers.GetPointer(true), }, }, }, @@ -4291,32 +4253,29 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with recursive set to false and multiple trusted addresses", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - RewriteClientIP: &ngfAPIv1alpha1.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha1.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha1.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "5.5.5.5/12", - }, - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "1.1.1.1/26", - }, - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "2.2.2.2/32", - }, - { - Type: ngfAPIv1alpha1.RewriteClientIPCIDRAddressType, - Value: "3.3.3.3/24", - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "5.5.5.5/12", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "1.1.1.1/26", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "2.2.2.2/32", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "3.3.3.3/24", }, - SetIPRecursively: helpers.GetPointer(false), }, + SetIPRecursively: helpers.GetPointer(false), }, }, }, @@ -4349,30 +4308,39 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings Logging }{ { - msg: "NginxProxy is nil", - g: &graph.Graph{}, + msg: "Gateway is nil", + g: &graph.Graph{ + Gateway: nil, + }, + expLoggingSettings: defaultLogging, + }, + { + msg: "Gateway has no effective NginxProxy", + g: &graph.Graph{ + Gateway: &graph.Gateway{ + EffectiveNginxProxy: nil, + }, + }, expLoggingSettings: defaultLogging, }, { - msg: "NginxProxy does not specify log level", + msg: "Effective NginxProxy does not specify log level", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, }, }, expLoggingSettings: defaultLogging, }, { - msg: "NginxProxy log level set to debug", + msg: "Effective NginxProxy log level set to debug", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelDebug)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, }, }, @@ -4380,13 +4348,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "debug"}, }, { - msg: "NginxProxy log level set to info", + msg: "Effective NginxProxy log level set to info", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelInfo)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), }, }, }, @@ -4394,13 +4361,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: defaultErrorLogLevel}, }, { - msg: "NginxProxy log level set to notice", + msg: "Effective NginxProxy log level set to notice", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelNotice)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), }, }, }, @@ -4408,13 +4374,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "notice"}, }, { - msg: "NginxProxy log level set to warn", + msg: "Effective NginxProxy log level set to warn", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelWarn)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), }, }, }, @@ -4422,13 +4387,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "warn"}, }, { - msg: "NginxProxy log level set to error", + msg: "Effective NginxProxy log level set to error", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelError)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, }, @@ -4436,13 +4400,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "error"}, }, { - msg: "NginxProxy log level set to crit", + msg: "Effective NginxProxy log level set to crit", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelCrit)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), }, }, }, @@ -4450,13 +4413,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "crit"}, }, { - msg: "NginxProxy log level set to alert", + msg: "Effective NginxProxy log level set to alert", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelAlert)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), }, }, }, @@ -4464,13 +4426,12 @@ func TestBuildLogging(t *testing.T) { expLoggingSettings: Logging{ErrorLevel: "alert"}, }, { - msg: "NginxProxy log level set to emerg", + msg: "Effective NginxProxy log level set to emerg", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - Logging: &ngfAPIv1alpha1.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha1.NginxLogLevelEmerg)}, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), }, }, }, @@ -4716,11 +4677,8 @@ func TestBuildNginxPlus(t *testing.T) { { msg: "NginxPlus default values are used when NginxProxy doesn't specify NginxPlus settings", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{}, - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, }, }, expNginxPlus: defaultNginxPlus, @@ -4728,14 +4686,11 @@ func TestBuildNginxPlus(t *testing.T) { { msg: "NginxProxy specifies one allowed address", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, }, }, }, @@ -4746,15 +4701,12 @@ func TestBuildNginxPlus(t *testing.T) { { msg: "NginxProxy specifies multiple allowed addresses", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, }, @@ -4765,14 +4717,11 @@ func TestBuildNginxPlus(t *testing.T) { { msg: "NginxProxy specifies 127.0.0.1 as allowed address", g: &graph.Graph{ - NginxProxy: &graph.NginxProxy{ - Valid: true, - Source: &ngfAPIv1alpha1.NginxProxy{ - Spec: ngfAPIv1alpha1.NginxProxySpec{ - NginxPlus: &ngfAPIv1alpha1.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha1.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha1.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, - }, + Gateway: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, }, }, }, diff --git a/internal/mode/static/state/graph/backend_refs.go b/internal/mode/static/state/graph/backend_refs.go index ad676f59c5..b8775df4e7 100644 --- a/internal/mode/static/state/graph/backend_refs.go +++ b/internal/mode/static/state/graph/backend_refs.go @@ -11,8 +11,7 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha3" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" - + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" @@ -49,7 +48,7 @@ func addBackendRefsToRouteRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, ) { for _, r := range routes { addBackendRefsToRules(r, refGrantResolver, services, backendTLSPolicies, npCfg) @@ -63,7 +62,7 @@ func addBackendRefsToRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, ) { if !route.Valid { return @@ -129,7 +128,7 @@ func createBackendRef( services map[types.NamespacedName]*v1.Service, refPath *field.Path, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, ) (BackendRef, *conditions.Condition) { // Data plane will handle invalid ref by responding with 500. // Because of that, we always need to add a BackendRef to group.Backends, even if the ref is invalid. @@ -327,30 +326,32 @@ func getIPFamilyAndPortFromRef( return svc.Spec.IPFamilies, svcPort, nil } -func verifyIPFamily(npCfg *NginxProxy, svcIPFamily []v1.IPFamily) error { - if npCfg == nil || npCfg.Source == nil || !npCfg.Valid { +func verifyIPFamily(npCfg *EffectiveNginxProxy, svcIPFamily []v1.IPFamily) error { + if npCfg == nil { return nil } - // we can access this field since we have already validated that ipFamily is not nil in validateNginxProxy. - npIPFamily := npCfg.Source.Spec.IPFamily - if *npIPFamily == ngfAPI.IPv4 { - if slices.Contains(svcIPFamily, v1.IPv6Protocol) { - // capitalizing error message to match the rest of the error messages associated with a condition - //nolint: stylecheck - return errors.New( - "service configured with IPv6 family but NginxProxy is configured with IPv4", - ) - } + containsIPv6 := slices.Contains(svcIPFamily, v1.IPv6Protocol) + containsIPv4 := slices.Contains(svcIPFamily, v1.IPv4Protocol) + + //nolint: stylecheck // used in status condition which is normally capitalized + errIPv6Mismatch := errors.New("service configured with IPv6 family but NginxProxy is configured with IPv4") + //nolint: stylecheck // used in status condition which is normally capitalized + errIPv4Mismatch := errors.New("service configured with IPv4 family but NginxProxy is configured with IPv6") + + npIPFamily := npCfg.IPFamily + + if npIPFamily == nil { + // default is dual so we don't need to check the service IPFamily. + return nil } - if *npIPFamily == ngfAPI.IPv6 { - if slices.Contains(svcIPFamily, v1.IPv4Protocol) { - // capitalizing error message to match the rest of the error messages associated with a condition - //nolint: stylecheck - return errors.New( - "service configured with IPv4 family but NginxProxy is configured with IPv6", - ) - } + + if *npIPFamily == ngfAPIv1alpha2.IPv4 && containsIPv6 { + return errIPv6Mismatch + } + + if *npIPFamily == ngfAPIv1alpha2.IPv6 && containsIPv4 { + return errIPv4Mismatch } return nil diff --git a/internal/mode/static/state/graph/backend_refs_test.go b/internal/mode/static/state/graph/backend_refs_test.go index 0d43456eed..6106b791d6 100644 --- a/internal/mode/static/state/graph/backend_refs_test.go +++ b/internal/mode/static/state/graph/backend_refs_test.go @@ -14,7 +14,7 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha2" "sigs.k8s.io/gateway-api/apis/v1alpha3" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" @@ -324,55 +324,35 @@ func TestVerifyIPFamily(t *testing.T) { test := []struct { name string expErr error - npCfg *NginxProxy + npCfg *EffectiveNginxProxy svcIPFamily []v1.IPFamily }{ { name: "Valid - IPv6 and IPv4 configured for NGINX, service has only IPv4", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, svcIPFamily: []v1.IPFamily{v1.IPv4Protocol}, }, { name: "Valid - IPv6 and IPv4 configured for NGINX, service has only IPv6", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, svcIPFamily: []v1.IPFamily{v1.IPv6Protocol}, }, { name: "Invalid - IPv4 configured for NGINX, service has only IPv6", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.IPv4), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv4), }, svcIPFamily: []v1.IPFamily{v1.IPv6Protocol}, expErr: errors.New("service configured with IPv6 family but NginxProxy is configured with IPv4"), }, { name: "Invalid - IPv6 configured for NGINX, service has only IPv4", - npCfg: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.IPv6), - }, - }, - Valid: true, + npCfg: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), }, svcIPFamily: []v1.IPFamily{v1.IPv4Protocol}, expErr: errors.New("service configured with IPv4 family but NginxProxy is configured with IPv6"), @@ -845,7 +825,7 @@ func TestCreateBackend(t *testing.T) { tests := []struct { expectedCondition *conditions.Condition - nginxProxy *NginxProxy + nginxProxySpec *EffectiveNginxProxy name string expectedServicePortReference string ref gatewayv1.HTTPBackendRef @@ -958,12 +938,7 @@ func TestCreateBackend(t *testing.T) { Weight: 5, Valid: false, }, - nginxProxy: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, - }, - Valid: true, - }, + nginxProxySpec: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6)}, expectedCondition: helpers.GetPointer( staticConds.NewRouteInvalidIPFamily(`service configured with IPv4 family but NginxProxy is configured with IPv6`), ), @@ -1043,7 +1018,7 @@ func TestCreateBackend(t *testing.T) { services, refPath, policies, - test.nginxProxy, + test.nginxProxySpec, ) g.Expect(helpers.Diff(test.expectedBackend, backend)).To(BeEmpty()) diff --git a/internal/mode/static/state/graph/gateway.go b/internal/mode/static/state/graph/gateway.go index 2ee69b5546..b6cfd49ebe 100644 --- a/internal/mode/static/state/graph/gateway.go +++ b/internal/mode/static/state/graph/gateway.go @@ -9,6 +9,7 @@ import ( v1 "sigs.k8s.io/gateway-api/apis/v1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" ngfsort "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) @@ -17,6 +18,12 @@ import ( type Gateway struct { // Source is the corresponding Gateway resource. Source *v1.Gateway + // NginxProxy is the NginxProxy referenced by this Gateway. + NginxProxy *NginxProxy + /// EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on + // the GatewayClass resource. This is the effective set of config that should be applied to the Gateway. + // If non-nil, then this config is valid. + EffectiveNginxProxy *EffectiveNginxProxy // Listeners include the listeners of the Gateway. Listeners []*Listener // Conditions holds the conditions for the Gateway. @@ -98,29 +105,91 @@ func buildGateway( gc *GatewayClass, refGrantResolver *referenceGrantResolver, protectedPorts ProtectedPorts, + nps map[types.NamespacedName]*NginxProxy, ) *Gateway { if gw == nil { return nil } - conds := validateGateway(gw, gc) + var np *NginxProxy + if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { + npName := types.NamespacedName{Namespace: gw.Namespace, Name: gw.Spec.Infrastructure.ParametersRef.Name} + np = nps[npName] + } - if len(conds) > 0 { + var gcNp *NginxProxy + if gc != nil { + gcNp = gc.NginxProxy + } + + effectiveNginxProxy := buildEffectiveNginxProxy(gcNp, np) + + conds, valid := validateGateway(gw, gc, np) + + if !valid { return &Gateway{ - Source: gw, - Valid: false, - Conditions: conds, + Source: gw, + Valid: false, + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Conditions: conds, } } return &Gateway{ - Source: gw, - Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), - Valid: true, + Source: gw, + Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Valid: true, + Conditions: conds, } } -func validateGateway(gw *v1.Gateway, gc *GatewayClass) []conditions.Condition { +func validateGatewayParametersRef(npCfg *NginxProxy, ref v1.LocalParametersReference) []conditions.Condition { + var conds []conditions.Condition + + path := field.NewPath("spec.infrastructure.parametersRef") + + if _, ok := supportedParamKinds[string(ref.Kind)]; !ok { + err := field.NotSupported(path.Child("kind"), string(ref.Kind), []string{kinds.NginxProxy}) + conds = append( + conds, + staticConds.NewGatewayRefInvalid(err.Error()), + staticConds.NewGatewayInvalidParameters(err.Error()), + ) + + return conds + } + + if npCfg == nil { + conds = append( + conds, + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters( + field.NotFound(path.Child("name"), ref.Name).Error(), + ), + ) + + return conds + } + + if !npCfg.Valid { + msg := npCfg.ErrMsgs.ToAggregate().Error() + conds = append( + conds, + staticConds.NewGatewayRefInvalid(msg), + staticConds.NewGatewayInvalidParameters(msg), + ) + + return conds + } + + conds = append(conds, staticConds.NewGatewayResolvedRefs()) + return conds +} + +func validateGateway(gw *v1.Gateway, gc *GatewayClass, npCfg *NginxProxy) ([]conditions.Condition, bool) { var conds []conditions.Condition if gc == nil { @@ -136,5 +205,17 @@ func validateGateway(gw *v1.Gateway, gc *GatewayClass) []conditions.Condition { conds = append(conds, staticConds.NewGatewayUnsupportedValue(valErr.Error())...) } - return conds + valid := true + // we evaluate validity before validating parametersRef because an invalid parametersRef/NginxProxy does not + // invalidate the entire Gateway. + if len(conds) > 0 { + valid = false + } + + if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { + paramConds := validateGatewayParametersRef(npCfg, *gw.Spec.Infrastructure.ParametersRef) + conds = append(conds, paramConds...) + } + + return conds, valid } diff --git a/internal/mode/static/state/graph/gateway_test.go b/internal/mode/static/state/graph/gateway_test.go index 7718a084c5..58c6638abb 100644 --- a/internal/mode/static/state/graph/gateway_test.go +++ b/internal/mode/static/state/graph/gateway_test.go @@ -8,10 +8,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + "sigs.k8s.io/gateway-api/apis/v1beta1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" @@ -338,6 +341,7 @@ func TestBuildGateway(t *testing.T) { ) type gatewayCfg struct { + ref *v1.LocalParametersReference listeners []v1.Listener addresses []v1.GatewaySpecAddress } @@ -354,12 +358,64 @@ func TestBuildGateway(t *testing.T) { Addresses: cfg.addresses, }, } + + if cfg.ref != nil { + lastCreatedGateway.Spec.Infrastructure = &v1.GatewayInfrastructure{ + ParametersRef: cfg.ref, + } + } return lastCreatedGateway } getLastCreatedGateway := func() *v1.Gateway { return lastCreatedGateway } + validGwNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "valid-gw-np", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError)}, + }, + } + validGwNpRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: validGwNp.Name, + } + invalidGwNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "invalid-gw-np", + }, + } + invalidGwNpRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: invalidGwNp.Name, + } + invalidKindRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: "Invalid", + Name: "invalid-kind", + } + npDoesNotExistRef := &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "does-not-exist", + } + + validGcNp := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "valid-gc-np", + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + }, + } + validGC := &GatewayClass{ Valid: true, } @@ -367,6 +423,14 @@ func TestBuildGateway(t *testing.T) { Valid: false, } + validGCWithNp := &GatewayClass{ + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGcNp, + Valid: true, + }, + } + supportedKindsForListeners := []v1.RouteGroupKind{ {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, @@ -509,6 +573,90 @@ func TestBuildGateway(t *testing.T) { }, name: "valid https listener with cross-namespace secret; allowed by reference grant", }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: validGwNpRef}), + gatewayClass: validGC, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + }, + name: "valid http listener with valid NginxProxy; GatewayClass has no NginxProxy", + }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: validGwNpRef}), + gatewayClass: validGCWithNp, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + }, + name: "valid http listener with valid NginxProxy; GatewayClass has valid NginxProxy too", + }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}}), + gatewayClass: validGCWithNp, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + }, + }, + name: "valid http listener; GatewayClass has valid NginxProxy", + }, { gateway: createGateway(gatewayCfg{listeners: []v1.Listener{crossNamespaceSecretListener}}), gatewayClass: validGC, @@ -1024,6 +1172,116 @@ func TestBuildGateway(t *testing.T) { }, name: "https listener and tls listener with non overlapping hostnames", }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: invalidKindRef}), + gatewayClass: validGC, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), + }, + }, + name: "invalid parameters ref kind", + }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: npDoesNotExistRef}), + gatewayClass: validGC, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.name: Not found: \"does-not-exist\"", + ), + }, + }, + name: "referenced NginxProxy doesn't exist", + }, + { + gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: invalidGwNpRef}), + gatewayClass: validGC, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + Valid: true, // invalid NginxProxy does not invalidate Gateway. + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + }, + }, + name: "invalid NginxProxy", + }, + { + gateway: createGateway( + gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}, ref: invalidGwNpRef}, + ), + gatewayClass: invalidGC, + expected: &Gateway{ + Source: getLastCreatedGateway(), + Valid: false, + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: append( + staticConds.NewGatewayInvalid("GatewayClass is invalid"), + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + ), + }, + name: "invalid gatewayclass and invalid NginxProxy", + }, } secretResolver := newSecretResolver( @@ -1032,12 +1290,106 @@ func TestBuildGateway(t *testing.T) { client.ObjectKeyFromObject(secretDiffNamespace): secretDiffNamespace, }) + nginxProxies := map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(validGwNp): {Valid: true, Source: validGwNp}, + client.ObjectKeyFromObject(validGcNp): {Valid: true, Source: validGcNp}, + client.ObjectKeyFromObject(invalidGwNp): { + Source: invalidGwNp, + ErrMsgs: append(field.ErrorList{}, field.Required(field.NewPath("somePath"), "someField")), + Valid: false, + }, + } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(test.refGrants) - result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, protectedPorts) + result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, protectedPorts, nginxProxies) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) }) } } + +func TestValidateGatewayParametersRef(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + np *NginxProxy + ref v1.LocalParametersReference + expConds []conditions.Condition + }{ + { + name: "unsupported parameter ref kind", + ref: v1.LocalParametersReference{ + Kind: "wrong-kind", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefInvalid( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"wrong-kind\": " + + "supported values: \"NginxProxy\"", + ), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"wrong-kind\": " + + "supported values: \"NginxProxy\"", + ), + }, + }, + { + name: "nil nginx proxy", + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters("spec.infrastructure.parametersRef.name: Not found: \"np\""), + }, + }, + { + name: "invalid nginx proxy", + np: &NginxProxy{ + Source: &ngfAPIv1alpha2.NginxProxy{}, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + }, + }, + { + name: "valid", + np: &NginxProxy{ + Source: &ngfAPIv1alpha2.NginxProxy{}, + Valid: true, + }, + ref: v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: "np", + }, + expConds: []conditions.Condition{ + staticConds.NewGatewayResolvedRefs(), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conds := validateGatewayParametersRef(test.np, test.ref) + g.Expect(conds).To(BeEquivalentTo(test.expConds)) + }) + } +} diff --git a/internal/mode/static/state/graph/gatewayclass.go b/internal/mode/static/state/graph/gatewayclass.go index 510fd63f32..db5e3dff77 100644 --- a/internal/mode/static/state/graph/gatewayclass.go +++ b/internal/mode/static/state/graph/gatewayclass.go @@ -1,8 +1,6 @@ package graph import ( - "errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" @@ -19,6 +17,8 @@ import ( type GatewayClass struct { // Source is the source resource. Source *v1.GatewayClass + // NginxProxy is the NginxProxy resource referenced by this GatewayClass. + NginxProxy *NginxProxy // Conditions include Conditions for the GatewayClass. Conditions []conditions.Condition // Valid shows whether the GatewayClass is valid. @@ -34,7 +34,7 @@ type processedGatewayClasses struct { // processGatewayClasses returns the "Winner" GatewayClass, which is defined in // the command-line argument and references this controller, and a list of "Ignored" GatewayClasses // that reference this controller, but are not named in the command-line argument. -// Also returns a boolean that says whether or not the GatewayClass defined +// Also returns a boolean that says whether the GatewayClass defined // in the command-line argument exists, regardless of which controller it references. func processGatewayClasses( gcs map[types.NamespacedName]*v1.GatewayClass, @@ -63,22 +63,66 @@ func processGatewayClasses( func buildGatewayClass( gc *v1.GatewayClass, - npCfg *NginxProxy, + nps map[types.NamespacedName]*NginxProxy, crdVersions map[types.NamespacedName]*metav1.PartialObjectMetadata, ) *GatewayClass { if gc == nil { return nil } - conds, valid := validateGatewayClass(gc, npCfg, crdVersions) + var np *NginxProxy + if gc.Spec.ParametersRef != nil { + np = getNginxProxyForGatewayClass(*gc.Spec.ParametersRef, nps) + } + + conds, valid := validateGatewayClass(gc, np, crdVersions) return &GatewayClass{ Source: gc, + NginxProxy: np, Valid: valid, Conditions: conds, } } +func getNginxProxyForGatewayClass( + ref v1.ParametersReference, + nps map[types.NamespacedName]*NginxProxy, +) *NginxProxy { + if ref.Namespace == nil { + return nil + } + + npName := types.NamespacedName{Name: ref.Name, Namespace: string(*ref.Namespace)} + + return nps[npName] +} + +func validateGatewayClassParametersRef(path *field.Path, ref v1.ParametersReference) []conditions.Condition { + var errs field.ErrorList + + if _, ok := supportedParamKinds[string(ref.Kind)]; !ok { + errs = append( + errs, + field.NotSupported(path.Child("kind"), string(ref.Kind), []string{kinds.NginxProxy}), + ) + } + + if ref.Namespace == nil { + errs = append(errs, field.Required(path.Child("namespace"), "ParametersRef must specify Namespace")) + } + + if len(errs) > 0 { + msg := errs.ToAggregate().Error() + return []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid(msg), + staticConds.NewGatewayClassInvalidParameters(msg), + } + } + + return nil +} + func validateGatewayClass( gc *v1.GatewayClass, npCfg *NginxProxy, @@ -86,28 +130,44 @@ func validateGatewayClass( ) ([]conditions.Condition, bool) { var conds []conditions.Condition - if gc.Spec.ParametersRef != nil { - var err error - path := field.NewPath("spec").Child("parametersRef") - if _, ok := supportedParamKinds[string(gc.Spec.ParametersRef.Kind)]; !ok { - err = field.NotSupported(path.Child("kind"), string(gc.Spec.ParametersRef.Kind), []string{kinds.NginxProxy}) - } else if npCfg == nil { - err = field.NotFound(path.Child("name"), gc.Spec.ParametersRef.Name) - conds = append(conds, staticConds.NewGatewayClassRefNotFound()) - } else if !npCfg.Valid { - err = errors.New(npCfg.ErrMsgs.ToAggregate().Error()) - } + supportedVersionConds, versionsValid := gatewayclass.ValidateCRDVersions(crdVersions) + conds = append(conds, supportedVersionConds...) - if err != nil { - conds = append(conds, staticConds.NewGatewayClassInvalidParameters(err.Error())) - } else { - conds = append(conds, staticConds.NewGatewayClassResolvedRefs()) - } + if gc.Spec.ParametersRef == nil { + return conds, versionsValid } - supportedVersionConds, versionsValid := gatewayclass.ValidateCRDVersions(crdVersions) + path := field.NewPath("spec").Child("parametersRef") + refConds := validateGatewayClassParametersRef(path, *gc.Spec.ParametersRef) + + // return early since parametersRef isn't valid + if len(refConds) > 0 { + conds = append(conds, refConds...) + return conds, versionsValid + } + + if npCfg == nil { + conds = append( + conds, + staticConds.NewGatewayClassRefNotFound(), + staticConds.NewGatewayClassInvalidParameters( + field.NotFound(path.Child("name"), gc.Spec.ParametersRef.Name).Error(), + ), + ) + return conds, versionsValid + } + + if !npCfg.Valid { + msg := npCfg.ErrMsgs.ToAggregate().Error() + conds = append( + conds, + staticConds.NewGatewayClassRefInvalid(msg), + staticConds.NewGatewayClassInvalidParameters(msg), + ) + return conds, versionsValid + } - return append(conds, supportedVersionConds...), versionsValid + return append(conds, staticConds.NewGatewayClassResolvedRefs()), versionsValid } var supportedParamKinds = map[string]struct{}{ diff --git a/internal/mode/static/state/graph/gatewayclass_test.go b/internal/mode/static/state/graph/gatewayclass_test.go index 1f1454e838..3d26b6b1fd 100644 --- a/internal/mode/static/state/graph/gatewayclass_test.go +++ b/internal/mode/static/state/graph/gatewayclass_test.go @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" @@ -127,17 +127,32 @@ func TestProcessGatewayClasses(t *testing.T) { func TestBuildGatewayClass(t *testing.T) { t.Parallel() validGC := &v1.GatewayClass{} + npNsName := types.NamespacedName{Namespace: "test", Name: "nginx-proxy"} + + np := &ngfAPIv1alpha2.NginxProxy{ + TypeMeta: metav1.TypeMeta{ + Kind: kinds.NginxProxy, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("my-svc"), + }, + }, + } gcWithParams := &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ Kind: v1.Kind(kinds.NginxProxy), - Namespace: helpers.GetPointer(v1.Namespace("test")), - Name: "nginx-proxy", + Namespace: helpers.GetPointer(v1.Namespace(npNsName.Namespace)), + Name: npNsName.Name, }, }, } + gcWithParamsNoNamespace := gcWithParams.DeepCopy() + gcWithParamsNoNamespace.Spec.ParametersRef.Namespace = nil + gcWithInvalidKind := &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ @@ -168,12 +183,11 @@ func TestBuildGatewayClass(t *testing.T) { } tests := []struct { - gc *v1.GatewayClass - np *NginxProxy - crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata - expected *GatewayClass - name string - expNPInvalid bool + gc *v1.GatewayClass + nps map[types.NamespacedName]*NginxProxy + crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata + expected *GatewayClass + name string }{ { gc: validGC, @@ -191,46 +205,54 @@ func TestBuildGatewayClass(t *testing.T) { }, { gc: gcWithParams, - np: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - TypeMeta: metav1.TypeMeta{ - Kind: kinds.NginxProxy, - }, - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - ServiceName: helpers.GetPointer("my-svc"), - }, - }, + nps: map[types.NamespacedName]*NginxProxy{ + npNsName: { + Source: np, + Valid: true, }, - Valid: true, }, expected: &GatewayClass{ Source: gcWithParams, Valid: true, Conditions: []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + NginxProxy: &NginxProxy{ + Valid: true, + Source: np, + }, }, name: "valid gatewayclass with paramsRef", }, { - gc: gcWithInvalidKind, - np: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - TypeMeta: metav1.TypeMeta{ - Kind: kinds.NginxProxy, - }, + gc: gcWithParamsNoNamespace, + expected: &GatewayClass{ + Source: gcWithParamsNoNamespace, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "spec.parametersRef.namespace: Required value: ParametersRef must specify Namespace", + ), + staticConds.NewGatewayClassInvalidParameters( + "spec.parametersRef.namespace: Required value: ParametersRef must specify Namespace", + ), }, - Valid: true, }, + name: "valid gatewayclass with paramsRef missing namespace", + }, + { + gc: gcWithInvalidKind, expected: &GatewayClass{ Source: gcWithInvalidKind, Valid: true, Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "spec.parametersRef.kind: Unsupported value: \"Invalid\": supported values: \"NginxProxy\"", + ), staticConds.NewGatewayClassInvalidParameters( "spec.parametersRef.kind: Unsupported value: \"Invalid\": supported values: \"NginxProxy\"", ), }, }, - name: "invalid gatewayclass with unsupported paramsRef Kind", + name: "valid gatewayclass with unsupported paramsRef Kind", }, { gc: gcWithParams, @@ -244,38 +266,57 @@ func TestBuildGatewayClass(t *testing.T) { ), }, }, - expNPInvalid: true, - name: "invalid gatewayclass with paramsRef resource that doesn't exist", + name: "valid gatewayclass with paramsRef resource that doesn't exist", }, { gc: gcWithParams, - np: &NginxProxy{ - Valid: false, - ErrMsgs: field.ErrorList{ - field.Invalid( - field.NewPath("spec", "telemetry", "serviceName"), - "my-svc", - "error", - ), - field.Invalid( - field.NewPath("spec", "telemetry", "exporter", "endpoint"), - "my-endpoint", - "error", - ), + nps: map[types.NamespacedName]*NginxProxy{ + npNsName: { + Valid: false, + ErrMsgs: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "telemetry", "serviceName"), + "my-svc", + "error", + ), + field.Invalid( + field.NewPath("spec", "telemetry", "exporter", "endpoint"), + "my-endpoint", + "error", + ), + }, }, }, expected: &GatewayClass{ Source: gcWithParams, Valid: true, Conditions: []conditions.Condition{ + staticConds.NewGatewayClassRefInvalid( + "[spec.telemetry.serviceName: Invalid value: \"my-svc\": error" + + ", spec.telemetry.exporter.endpoint: Invalid value: \"my-endpoint\": error]", + ), staticConds.NewGatewayClassInvalidParameters( "[spec.telemetry.serviceName: Invalid value: \"my-svc\": error" + ", spec.telemetry.exporter.endpoint: Invalid value: \"my-endpoint\": error]", ), }, + NginxProxy: &NginxProxy{ + Valid: false, + ErrMsgs: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "telemetry", "serviceName"), + "my-svc", + "error", + ), + field.Invalid( + field.NewPath("spec", "telemetry", "exporter", "endpoint"), + "my-endpoint", + "error", + ), + }, + }, }, - expNPInvalid: true, - name: "invalid gatewayclass with invalid paramsRef resource", + name: "valid gatewayclass with invalid paramsRef resource", }, { gc: validGC, @@ -294,11 +335,8 @@ func TestBuildGatewayClass(t *testing.T) { t.Parallel() g := NewWithT(t) - result := buildGatewayClass(test.gc, test.np, test.crdMetadata) + result := buildGatewayClass(test.gc, test.nps, test.crdMetadata) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) - if test.np != nil { - g.Expect(test.np.Valid).ToNot(Equal(test.expNPInvalid)) - } }) } } diff --git a/internal/mode/static/state/graph/graph.go b/internal/mode/static/state/graph/graph.go index 834eff4de0..b05e0c35e7 100644 --- a/internal/mode/static/state/graph/graph.go +++ b/internal/mode/static/state/graph/graph.go @@ -14,7 +14,8 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha3" "sigs.k8s.io/gateway-api/apis/v1beta1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" @@ -35,10 +36,10 @@ type ClusterState struct { CRDMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata BackendTLSPolicies map[types.NamespacedName]*v1alpha3.BackendTLSPolicy ConfigMaps map[types.NamespacedName]*v1.ConfigMap - NginxProxies map[types.NamespacedName]*ngfAPI.NginxProxy + NginxProxies map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy GRPCRoutes map[types.NamespacedName]*gatewayv1.GRPCRoute NGFPolicies map[PolicyKey]policies.Policy - SnippetsFilters map[types.NamespacedName]*ngfAPI.SnippetsFilter + SnippetsFilters map[types.NamespacedName]*ngfAPIv1alpha1.SnippetsFilter } // Graph is a Graph-like representation of Gateway API resources. @@ -70,10 +71,10 @@ type Graph struct { ReferencedServices map[types.NamespacedName]*ReferencedService // ReferencedCaCertConfigMaps includes ConfigMaps that have been referenced by any BackendTLSPolicies. ReferencedCaCertConfigMaps map[types.NamespacedName]*CaCertConfigMap + // ReferencedNginxProxies includes NginxProxies that have been referenced by a GatewayClass or the winning Gateway. + ReferencedNginxProxies map[types.NamespacedName]*NginxProxy // BackendTLSPolicies holds BackendTLSPolicy resources. BackendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy - // NginxProxy holds the NginxProxy config for the GatewayClass. - NginxProxy *NginxProxy // NGFPolicies holds all NGF Policies. NGFPolicies map[PolicyKey]*Policy // GlobalSettings contains global settings from the current state of the graph that may be @@ -126,9 +127,10 @@ func (g *Graph) IsReferenced(resourceType ngftypes.ObjectType, nsname types.Name // Service Namespace should be the same Namespace as the EndpointSlice _, exists := g.ReferencedServices[types.NamespacedName{Namespace: nsname.Namespace, Name: svcName}] return exists - // NginxProxy reference exists if it is linked to a GatewayClass. - case *ngfAPI.NginxProxy: - return isNginxProxyReferenced(nsname, g.GatewayClass) + // NginxProxy reference exists if the GatewayClass or winning Gateway references it. + case *ngfAPIv1alpha2.NginxProxy: + _, exists := g.ReferencedNginxProxies[nsname] + return exists default: return false } @@ -200,32 +202,39 @@ func BuildGraph( validators validation.Validators, protectedPorts ProtectedPorts, ) *Graph { - var globalSettings *policies.GlobalSettings - processedGwClasses, gcExists := processGatewayClasses(state.GatewayClasses, gcName, controllerName) if gcExists && processedGwClasses.Winner == nil { // configured GatewayClass does not reference this controller return &Graph{} } - npCfg := buildNginxProxy(state.NginxProxies, processedGwClasses.Winner, validators.GenericValidator) - gc := buildGatewayClass(processedGwClasses.Winner, npCfg, state.CRDMetadata) - if gc != nil && npCfg != nil && npCfg.Source != nil { - spec := npCfg.Source.Spec - globalSettings = &policies.GlobalSettings{ - NginxProxyValid: npCfg.Valid, - TelemetryEnabled: spec.Telemetry != nil && spec.Telemetry.Exporter != nil, - } - } + processedGws := processGateways(state.Gateways, gcName) + processedNginxProxies := processNginxProxies( + state.NginxProxies, + validators.GenericValidator, + processedGwClasses.Winner, + processedGws.Winner, + ) + + gc := buildGatewayClass( + processedGwClasses.Winner, + processedNginxProxies, + state.CRDMetadata, + ) secretResolver := newSecretResolver(state.Secrets) configMapResolver := newConfigMapResolver(state.ConfigMaps) - processedGws := processGateways(state.Gateways, gcName) - refGrantResolver := newReferenceGrantResolver(state.ReferenceGrants) - gw := buildGateway(processedGws.Winner, secretResolver, gc, refGrantResolver, protectedPorts) + gw := buildGateway( + processedGws.Winner, + secretResolver, + gc, + refGrantResolver, + protectedPorts, + processedNginxProxies, + ) processedBackendTLSPolicies := processBackendTLSPolicies( state.BackendTLSPolicies, @@ -236,13 +245,17 @@ func BuildGraph( ) processedSnippetsFilters := processSnippetsFilters(state.SnippetsFilters) + var effectiveNginxProxy *EffectiveNginxProxy + if gw != nil { + effectiveNginxProxy = gw.EffectiveNginxProxy + } routes := buildRoutesForGateways( validators.HTTPFieldsValidator, state.HTTPRoutes, state.GRPCRoutes, processedGws.GetAllNsNames(), - npCfg, + effectiveNginxProxy, processedSnippetsFilters, ) @@ -250,17 +263,30 @@ func BuildGraph( state.TLSRoutes, processedGws.GetAllNsNames(), state.Services, - npCfg, + effectiveNginxProxy, refGrantResolver, ) bindRoutesToListeners(routes, l4routes, gw, state.Namespaces) - addBackendRefsToRouteRules(routes, refGrantResolver, state.Services, processedBackendTLSPolicies, npCfg) + addBackendRefsToRouteRules( + routes, + refGrantResolver, + state.Services, + processedBackendTLSPolicies, + effectiveNginxProxy, + ) referencedNamespaces := buildReferencedNamespaces(state.Namespaces, gw) referencedServices := buildReferencedServices(routes, l4routes, gw) + var globalSettings *policies.GlobalSettings + if gw != nil && gw.EffectiveNginxProxy != nil { + globalSettings = &policies.GlobalSettings{ + NginxProxyValid: true, // for effective nginx proxy to be set, the config must be valid + TelemetryEnabled: telemetryEnabledForNginxProxy(gw.EffectiveNginxProxy), + } + } // policies must be processed last because they rely on the state of the other resources in the graph processedPolicies := processPolicies( state.NGFPolicies, @@ -284,8 +310,8 @@ func BuildGraph( ReferencedNamespaces: referencedNamespaces, ReferencedServices: referencedServices, ReferencedCaCertConfigMaps: configMapResolver.getResolvedConfigMaps(), + ReferencedNginxProxies: processedNginxProxies, BackendTLSPolicies: processedBackendTLSPolicies, - NginxProxy: npCfg, NGFPolicies: processedPolicies, GlobalSettings: globalSettings, SnippetsFilters: processedSnippetsFilters, diff --git a/internal/mode/static/state/graph/graph_test.go b/internal/mode/static/state/graph/graph_test.go index fe96db4402..54e1456124 100644 --- a/internal/mode/static/state/graph/graph_test.go +++ b/internal/mode/static/state/graph/graph_test.go @@ -17,7 +17,8 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha3" "sigs.k8s.io/gateway-api/apis/v1beta1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" @@ -113,35 +114,35 @@ func TestBuildGraph(t *testing.T) { } refSnippetsFilterExtensionRef := &gatewayv1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "ref-snippets-filter", } - unreferencedSnippetsFilter := &ngfAPI.SnippetsFilter{ + unreferencedSnippetsFilter := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Name: "unref-snippets-filter", Namespace: testNs, }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextMain, + Context: ngfAPIv1alpha1.NginxContextMain, Value: "main snippet", }, }, }, } - referencedSnippetsFilter := &ngfAPI.SnippetsFilter{ + referencedSnippetsFilter := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Name: "ref-snippets-filter", Namespace: testNs, }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextHTTPServer, + Context: ngfAPIv1alpha1.NginxContextHTTPServer, Value: "server snippet", }, }, @@ -152,8 +153,8 @@ func TestBuildGraph(t *testing.T) { Source: unreferencedSnippetsFilter, Valid: true, Referenced: false, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextMain: "main snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextMain: "main snippet", }, } @@ -161,8 +162,8 @@ func TestBuildGraph(t *testing.T) { Source: referencedSnippetsFilter, Valid: true, Referenced: true, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTPServer: "server snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTPServer: "server snippet", }, } @@ -374,7 +375,7 @@ func TestBuildGraph(t *testing.T) { }, } - createGateway := func(name string) *gatewayv1.Gateway { + createGateway := func(name, nginxProxyName string) *gatewayv1.Gateway { return &gatewayv1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNs, @@ -382,6 +383,13 @@ func TestBuildGraph(t *testing.T) { }, Spec: gatewayv1.GatewaySpec{ GatewayClassName: gcName, + Infrastructure: &gatewayv1.GatewayInfrastructure{ + ParametersRef: &gatewayv1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: nginxProxyName, + }, + }, Listeners: []gatewayv1.Listener{ { Name: "listener-80-1", @@ -440,8 +448,35 @@ func TestBuildGraph(t *testing.T) { } } - gw1 := createGateway("gateway-1") - gw2 := createGateway("gateway-2") + gw1 := createGateway("gateway-1", "np-1") + gw2 := createGateway("gateway-2", "np-2") + + // np1 is referenced by gw1 and sets the nginx error log to error. + // Since gw1 is the winning gateway, we expect this nginx proxy to be configured and merged with the gateway class + // nginx proxy configuration. + np1 := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-1", + Namespace: testNs, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + }, + } + + // np2 is referenced by gw2 and sets the IPFamily to IPv6. + // Since gw2 is not the winning gateway, we do not expect this nginx proxy to be configured. + np2 := &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np-2", + Namespace: testNs, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), + }, + } svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -532,20 +567,22 @@ func TestBuildGraph(t *testing.T) { }, } - proxy := &ngfAPI.NginxProxy{ + // npGlobal is referenced by the gateway class, and we expect it to be configured and merged with np1. + npGlobal := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy", + Name: "np-global", + Namespace: testNs, }, - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Endpoint: "1.2.3.4:123", - Interval: helpers.GetPointer(ngfAPI.Duration("5s")), + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), BatchSize: helpers.GetPointer(int32(512)), BatchCount: helpers.GetPointer(int32(4)), }, ServiceName: helpers.GetPointer("my-svc"), - SpanAttributes: []ngfAPI.SpanAttribute{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "key", Value: "value"}, }, }, @@ -559,13 +596,13 @@ func TestBuildGraph(t *testing.T) { // Testing one type of policy per attachment point should suffice. polGVK := schema.GroupVersionKind{Kind: kinds.ClientSettingsPolicy} hrPolicyKey := PolicyKey{GVK: polGVK, NsName: types.NamespacedName{Namespace: testNs, Name: "hrPolicy"}} - hrPolicy := &ngfAPI.ClientSettingsPolicy{ + hrPolicy := &ngfAPIv1alpha1.ClientSettingsPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "hrPolicy", Namespace: testNs, }, TypeMeta: metav1.TypeMeta{Kind: kinds.ClientSettingsPolicy}, - Spec: ngfAPI.ClientSettingsPolicySpec{ + Spec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ TargetRef: createTestRef(kinds.HTTPRoute, gatewayv1.GroupName, "hr-1"), }, } @@ -592,13 +629,13 @@ func TestBuildGraph(t *testing.T) { } gwPolicyKey := PolicyKey{GVK: polGVK, NsName: types.NamespacedName{Namespace: testNs, Name: "gwPolicy"}} - gwPolicy := &ngfAPI.ClientSettingsPolicy{ + gwPolicy := &ngfAPIv1alpha1.ClientSettingsPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "gwPolicy", Namespace: testNs, }, TypeMeta: metav1.TypeMeta{Kind: kinds.ClientSettingsPolicy}, - Spec: ngfAPI.ClientSettingsPolicySpec{ + Spec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ TargetRef: createTestRef(kinds.Gateway, gatewayv1.GroupName, "gateway-1"), }, } @@ -667,14 +704,16 @@ func TestBuildGraph(t *testing.T) { ConfigMaps: map[types.NamespacedName]*v1.ConfigMap{ client.ObjectKeyFromObject(cm): cm, }, - NginxProxies: map[types.NamespacedName]*ngfAPI.NginxProxy{ - client.ObjectKeyFromObject(proxy): proxy, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(npGlobal): npGlobal, + client.ObjectKeyFromObject(np1): np1, + client.ObjectKeyFromObject(np2): np2, }, NGFPolicies: map[PolicyKey]policies.Policy{ hrPolicyKey: hrPolicy, gwPolicyKey: gwPolicy, }, - SnippetsFilters: map[types.NamespacedName]*ngfAPI.SnippetsFilter{ + SnippetsFilters: map[types.NamespacedName]*ngfAPIv1alpha1.SnippetsFilter{ client.ObjectKeyFromObject(unreferencedSnippetsFilter): unreferencedSnippetsFilter, client.ObjectKeyFromObject(referencedSnippetsFilter): referencedSnippetsFilter, }, @@ -826,6 +865,10 @@ func TestBuildGraph(t *testing.T) { Source: gc, Valid: true, Conditions: []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + NginxProxy: &NginxProxy{ + Source: npGlobal, + Valid: true, + }, }, Gateway: &Gateway{ Source: gw1, @@ -878,6 +921,28 @@ func TestBuildGraph(t *testing.T) { }, Valid: true, Policies: []*Policy{processedGwPolicy}, + NginxProxy: &NginxProxy{ + Source: np1, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, }, IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ {Namespace: testNs, Name: "gateway-2"}: gw2, @@ -918,9 +983,15 @@ func TestBuildGraph(t *testing.T) { BackendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ client.ObjectKeyFromObject(btp.Source): &btp, }, - NginxProxy: &NginxProxy{ - Source: proxy, - Valid: true, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(npGlobal): { + Source: npGlobal, + Valid: true, + }, + client.ObjectKeyFromObject(np1): { + Source: np1, + Valid: true, + }, }, NGFPolicies: map[PolicyKey]*Policy{ hrPolicyKey: processedRoutePolicy, @@ -953,9 +1024,10 @@ func TestBuildGraph(t *testing.T) { Spec: gatewayv1.GatewayClassSpec{ ControllerName: controllerName, ParametersRef: &gatewayv1.ParametersReference{ - Group: gatewayv1.Group("gateway.nginx.org"), - Kind: gatewayv1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", + Group: gatewayv1.Group("gateway.nginx.org"), + Kind: gatewayv1.Kind(kinds.NginxProxy), + Name: "np-global", + Namespace: helpers.GetPointer(gatewayv1.Namespace(testNs)), }, }, } @@ -1134,27 +1206,15 @@ func TestIsReferenced(t *testing.T) { }, } - gcWithNginxProxy := &GatewayClass{ - Source: &gatewayv1.GatewayClass{ - Spec: gatewayv1.GatewayClassSpec{ - ParametersRef: &gatewayv1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: gatewayv1.Kind(kinds.NginxProxy), - Name: "nginx-proxy-in-gc", - }, - }, - }, - } - - npNotInGatewayClass := &ngfAPI.NginxProxy{ + npNotReferenced := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy", + Name: "nginx-proxy-not-ref", }, } - npInGatewayClass := &ngfAPI.NginxProxy{ + npReferenced := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-proxy-in-gc", + Name: "nginx-proxy-ref", }, } @@ -1179,6 +1239,11 @@ func TestIsReferenced(t *testing.T) { }), }, }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(npReferenced): { + Source: npReferenced, + }, + }, } tests := []struct { @@ -1309,16 +1374,14 @@ func TestIsReferenced(t *testing.T) { // NginxProxy tests { - name: "NginxProxy is referenced in GatewayClass", - resource: npInGatewayClass, - gc: gcWithNginxProxy, + name: "NginxProxy is referenced", + resource: npReferenced, graph: graph, expected: true, }, { - name: "NginxProxy is not referenced in GatewayClass", - resource: npNotInGatewayClass, - gc: gcWithNginxProxy, + name: "NginxProxy is not referenced", + resource: npNotReferenced, graph: graph, expected: false, }, diff --git a/internal/mode/static/state/graph/grpcroute_test.go b/internal/mode/static/state/graph/grpcroute_test.go index 5417208d9e..e256e4632e 100644 --- a/internal/mode/static/state/graph/grpcroute_test.go +++ b/internal/mode/static/state/graph/grpcroute_test.go @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" @@ -89,7 +89,7 @@ func TestBuildGRPCRoutes(t *testing.T) { ExtensionRef: &v1.LocalObjectReference{ Name: "sf", Kind: kinds.SnippetsFilter, - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, }, } @@ -111,15 +111,15 @@ func TestBuildGRPCRoutes(t *testing.T) { client.ObjectKeyFromObject(grWrongGateway): grWrongGateway, } - sf := &ngfAPI.SnippetsFilter{ + sf := &ngfAPIv1alpha1.SnippetsFilter{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "sf", }, - Spec: ngfAPI.SnippetsFilterSpec{ - Snippets: []ngfAPI.Snippet{ + Spec: ngfAPIv1alpha1.SnippetsFilterSpec{ + Snippets: []ngfAPIv1alpha1.Snippet{ { - Context: ngfAPI.NginxContextHTTP, + Context: ngfAPIv1alpha1.NginxContextHTTP, Value: "http snippet", }, }, @@ -159,8 +159,8 @@ func TestBuildGRPCRoutes(t *testing.T) { ResolvedExtensionRef: &ExtensionRefFilter{ SnippetsFilter: &SnippetsFilter{ Source: sf, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTP: "http snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTP: "http snippet", }, Valid: true, Referenced: true, @@ -195,12 +195,8 @@ func TestBuildGRPCRoutes(t *testing.T) { validator := &validationfakes.FakeHTTPFieldsValidator{} - npCfg := &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - DisableHTTP2: false, - }, - }, + npCfg := &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), } for _, test := range tests { @@ -212,8 +208,8 @@ func TestBuildGRPCRoutes(t *testing.T) { client.ObjectKeyFromObject(sf): { Source: sf, Valid: true, - Snippets: map[ngfAPI.NginxContext]string{ - ngfAPI.NginxContextHTTP: "http snippet", + Snippets: map[ngfAPIv1alpha1.NginxContext]string{ + ngfAPIv1alpha1.NginxContextHTTP: "http snippet", }, }, } @@ -353,7 +349,7 @@ func TestBuildGRPCRoute(t *testing.T) { grValidFilterRule := createGRPCMethodMatch("myService", "myMethod", "Exact") grValidHeaderMatch := createGRPCHeadersMatch("RegularExpression", "MyHeader", "headers-[a-z]+") validSnippetsFilterRef := &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "sf", } @@ -414,7 +410,7 @@ func TestBuildGRPCRoute(t *testing.T) { { Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "does-not-exist", }, @@ -433,7 +429,7 @@ func TestBuildGRPCRoute(t *testing.T) { { Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha1.GroupName, Kind: kinds.SnippetsFilter, Name: "does-not-exist", }, diff --git a/internal/mode/static/state/graph/nginxproxy.go b/internal/mode/static/state/graph/nginxproxy.go index 831a205120..3b013b8f39 100644 --- a/internal/mode/static/state/graph/nginxproxy.go +++ b/internal/mode/static/state/graph/nginxproxy.go @@ -1,15 +1,18 @@ package graph import ( + "encoding/json" + "fmt" "slices" "k8s.io/apimachinery/pkg/types" k8svalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" ) @@ -17,45 +20,169 @@ import ( // NginxProxy represents the NginxProxy resource. type NginxProxy struct { // Source is the source resource. - Source *ngfAPI.NginxProxy + Source *ngfAPIv1alpha2.NginxProxy // ErrMsgs contains the validation errors if they exist, to be included in the GatewayClass condition. ErrMsgs field.ErrorList // Valid shows whether the NginxProxy is valid. Valid bool } -// buildNginxProxy validates and returns the NginxProxy associated with the GatewayClass (if it exists). -func buildNginxProxy( - nps map[types.NamespacedName]*ngfAPI.NginxProxy, - gc *v1.GatewayClass, +// EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on the +// GatewayClass resource. This is the effective set of config that should be applied to the Gateway. +type EffectiveNginxProxy ngfAPIv1alpha2.NginxProxySpec + +// buildEffectiveNginxProxy builds the effective NginxProxy for the Gateway by merging the GatewayClass and Gateway +// NginxProxy resources. Fields specified on the Gateway NginxProxy override those set on the GatewayClass NginxProxy. +func buildEffectiveNginxProxy(gatewayClassNp, gatewayNp *NginxProxy) *EffectiveNginxProxy { + gcNpValid, gwNpValid := nginxProxyValid(gatewayClassNp), nginxProxyValid(gatewayNp) + if !gcNpValid && !gwNpValid { + return nil + } + + if !gcNpValid { + enp := EffectiveNginxProxy(*gatewayNp.Source.Spec.DeepCopy()) + return &enp + } + + if !gwNpValid { + enp := EffectiveNginxProxy(*gatewayClassNp.Source.Spec.DeepCopy()) + return &enp + } + + global := EffectiveNginxProxy(*gatewayClassNp.Source.Spec.DeepCopy()) + local := EffectiveNginxProxy(*gatewayNp.Source.Spec.DeepCopy()) + + // by marshaling the local config and then unmarshaling on top of the global config, + // we ensure that any unset local values are set with the global values + localBytes, err := json.Marshal(local) + if err != nil { + panic( + fmt.Sprintf( + "could not marshal NginxProxy resource referenced by Gateway %s", + client.ObjectKeyFromObject(gatewayNp.Source), + ), + ) + } + + err = json.Unmarshal(localBytes, &global) + if err != nil { + panic( + fmt.Sprintf( + "could not unmarshal NginxProxy resource referenced by GatewayClass %s", + client.ObjectKeyFromObject(gatewayClassNp.Source), + ), + ) + } + + // this json trick doesn't work for unsetting slices, so we need to do that manually. + if local.Telemetry != nil { + if local.Telemetry.DisabledFeatures != nil && len(local.Telemetry.DisabledFeatures) == 0 { + global.Telemetry.DisabledFeatures = []ngfAPIv1alpha2.DisableTelemetryFeature{} + } + + if local.Telemetry.SpanAttributes != nil && len(local.Telemetry.SpanAttributes) == 0 { + global.Telemetry.SpanAttributes = []ngfAPIv1alpha1.SpanAttribute{} + } + } + + if local.RewriteClientIP != nil { + if local.RewriteClientIP.TrustedAddresses != nil && len(local.RewriteClientIP.TrustedAddresses) == 0 { + global.RewriteClientIP.TrustedAddresses = []ngfAPIv1alpha2.RewriteClientIPAddress{} + } + } + + return &global +} + +func nginxProxyValid(np *NginxProxy) bool { + return np != nil && np.Source != nil && np.Valid +} + +func telemetryEnabledForNginxProxy(np *EffectiveNginxProxy) bool { + if np.Telemetry == nil || np.Telemetry.Exporter == nil || np.Telemetry.Exporter.Endpoint == nil { + return false + } + + if slices.Contains(np.Telemetry.DisabledFeatures, ngfAPIv1alpha2.DisableTracing) { + return false + } + + return true +} + +func processNginxProxies( + nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy, validator validation.GenericValidator, -) *NginxProxy { + gc *v1.GatewayClass, + winningGateway *v1.Gateway, +) map[types.NamespacedName]*NginxProxy { + referencedNginxProxies := make(map[types.NamespacedName]*NginxProxy) + if gcReferencesAnyNginxProxy(gc) { - npCfg := nps[types.NamespacedName{Name: gc.Spec.ParametersRef.Name}] - if npCfg != nil { - errs := validateNginxProxy(validator, npCfg) - - return &NginxProxy{ - Source: npCfg, - Valid: len(errs) == 0, - ErrMsgs: errs, + // we will ignore references without namespaces + // the gateway class status will contain an error message about the missing namespace + if gc.Spec.ParametersRef.Namespace != nil { + refNp := types.NamespacedName{ + Name: gc.Spec.ParametersRef.Name, + Namespace: string(*gc.Spec.ParametersRef.Namespace), + } + + if np, ok := nps[refNp]; ok { + referencedNginxProxies[refNp] = buildNginxProxy(np, validator) } } } - return nil + if gwReferencesAnyNginxProxy(winningGateway) { + refNp := types.NamespacedName{ + Name: winningGateway.Spec.Infrastructure.ParametersRef.Name, + Namespace: winningGateway.Namespace, + } + + if np, ok := nps[refNp]; ok { + referencedNginxProxies[refNp] = buildNginxProxy(np, validator) + } + } + + if len(referencedNginxProxies) == 0 { + return nil + } + + return referencedNginxProxies } -// isNginxProxyReferenced returns whether or not a specific NginxProxy is referenced in the GatewayClass. -func isNginxProxyReferenced(npNSName types.NamespacedName, gc *GatewayClass) bool { - return gc != nil && gcReferencesAnyNginxProxy(gc.Source) && gc.Source.Spec.ParametersRef.Name == npNSName.Name +// buildNginxProxy validates and returns the NginxProxy associated with the GatewayClass (if it exists). +func buildNginxProxy( + np *ngfAPIv1alpha2.NginxProxy, + validator validation.GenericValidator, +) *NginxProxy { + if np != nil { + errs := validateNginxProxy(validator, np) + + return &NginxProxy{ + Source: np, + Valid: len(errs) == 0, + ErrMsgs: errs, + } + } + + return nil } // gcReferencesNginxProxy returns whether a GatewayClass references any NginxProxy resource. func gcReferencesAnyNginxProxy(gc *v1.GatewayClass) bool { if gc != nil { ref := gc.Spec.ParametersRef - return ref != nil && ref.Group == ngfAPI.GroupName && ref.Kind == v1.Kind(kinds.NginxProxy) + return ref != nil && ref.Group == ngfAPIv1alpha2.GroupName && ref.Kind == kinds.NginxProxy + } + + return false +} + +func gwReferencesAnyNginxProxy(gw *v1.Gateway) bool { + if gw != nil && gw.Spec.Infrastructure != nil { + ref := gw.Spec.Infrastructure.ParametersRef + return ref != nil && ref.Group == ngfAPIv1alpha2.GroupName && ref.Kind == kinds.NginxProxy } return false @@ -64,7 +191,7 @@ func gcReferencesAnyNginxProxy(gc *v1.GatewayClass) bool { // validateNginxProxy performs re-validation on string values in the case of CRD validation failure. func validateNginxProxy( validator validation.GenericValidator, - npCfg *ngfAPI.NginxProxy, + npCfg *ngfAPIv1alpha2.NginxProxy, ) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -85,8 +212,8 @@ func validateNginxProxy( exp := telemetry.Exporter expPath := telPath.Child("exporter") - if exp.Endpoint != "" { - if err := validator.ValidateEndpoint(exp.Endpoint); err != nil { + if exp.Endpoint != nil { + if err := validator.ValidateEndpoint(*exp.Endpoint); err != nil { allErrs = append(allErrs, field.Invalid(expPath.Child("endpoint"), exp.Endpoint, err.Error())) } } @@ -116,17 +243,15 @@ func validateNginxProxy( ipFamily := npCfg.Spec.IPFamily ipFamilyPath := spec.Child("ipFamily") switch *ipFamily { - case ngfAPI.Dual, ngfAPI.IPv4, ngfAPI.IPv6: + case ngfAPIv1alpha2.Dual, ngfAPIv1alpha2.IPv4, ngfAPIv1alpha2.IPv6: default: allErrs = append( allErrs, field.NotSupported( ipFamilyPath, ipFamily, - []string{string(ngfAPI.Dual), string(ngfAPI.IPv4), string(ngfAPI.IPv6)})) + []string{string(ngfAPIv1alpha2.Dual), string(ngfAPIv1alpha2.IPv4), string(ngfAPIv1alpha2.IPv6)})) } - } else { - npCfg.Spec.IPFamily = helpers.GetPointer[ngfAPI.IPFamilyType](ngfAPI.Dual) } allErrs = append(allErrs, validateLogging(npCfg)...) @@ -138,7 +263,7 @@ func validateNginxProxy( return allErrs } -func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateLogging(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -150,14 +275,14 @@ func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { errLevel := string(*logging.ErrorLevel) validLogLevels := []string{ - string(ngfAPI.NginxLogLevelDebug), - string(ngfAPI.NginxLogLevelInfo), - string(ngfAPI.NginxLogLevelNotice), - string(ngfAPI.NginxLogLevelWarn), - string(ngfAPI.NginxLogLevelError), - string(ngfAPI.NginxLogLevelCrit), - string(ngfAPI.NginxLogLevelAlert), - string(ngfAPI.NginxLogLevelEmerg), + string(ngfAPIv1alpha2.NginxLogLevelDebug), + string(ngfAPIv1alpha2.NginxLogLevelInfo), + string(ngfAPIv1alpha2.NginxLogLevelNotice), + string(ngfAPIv1alpha2.NginxLogLevelWarn), + string(ngfAPIv1alpha2.NginxLogLevelError), + string(ngfAPIv1alpha2.NginxLogLevelCrit), + string(ngfAPIv1alpha2.NginxLogLevelAlert), + string(ngfAPIv1alpha2.NginxLogLevelEmerg), } if !slices.Contains(validLogLevels, errLevel) { @@ -175,7 +300,7 @@ func validateLogging(npCfg *ngfAPI.NginxProxy) field.ErrorList { return allErrs } -func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateRewriteClientIP(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -194,14 +319,17 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { } switch mode { - case ngfAPI.RewriteClientIPModeProxyProtocol, ngfAPI.RewriteClientIPModeXForwardedFor: + case ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol, ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor: default: allErrs = append( allErrs, field.NotSupported( rewriteClientIPPath.Child("mode"), mode, - []string{string(ngfAPI.RewriteClientIPModeProxyProtocol), string(ngfAPI.RewriteClientIPModeXForwardedFor)}, + []string{ + string(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + string(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + }, ), ) } @@ -218,15 +346,15 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { valuePath := trustedAddressesPath.Child("value") switch addr.Type { - case ngfAPI.RewriteClientIPCIDRAddressType: + case ngfAPIv1alpha2.RewriteClientIPCIDRAddressType: if err := k8svalidation.IsValidCIDR(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.RewriteClientIPIPAddressType: + case ngfAPIv1alpha2.RewriteClientIPIPAddressType: if err := k8svalidation.IsValidIP(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.RewriteClientIPHostnameAddressType: + case ngfAPIv1alpha2.RewriteClientIPHostnameAddressType: if errs := k8svalidation.IsDNS1123Subdomain(addr.Value); len(errs) > 0 { for _, e := range errs { allErrs = append(allErrs, field.Invalid(valuePath, addr.Value, e)) @@ -238,9 +366,9 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { field.NotSupported(trustedAddressesPath.Child("type"), addr.Type, []string{ - string(ngfAPI.RewriteClientIPCIDRAddressType), - string(ngfAPI.RewriteClientIPIPAddressType), - string(ngfAPI.RewriteClientIPHostnameAddressType), + string(ngfAPIv1alpha2.RewriteClientIPCIDRAddressType), + string(ngfAPIv1alpha2.RewriteClientIPIPAddressType), + string(ngfAPIv1alpha2.RewriteClientIPHostnameAddressType), }, ), ) @@ -251,7 +379,7 @@ func validateRewriteClientIP(npCfg *ngfAPI.NginxProxy) field.ErrorList { return allErrs } -func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { +func validateNginxPlus(npCfg *ngfAPIv1alpha2.NginxProxy) field.ErrorList { var allErrs field.ErrorList spec := field.NewPath("spec") @@ -264,11 +392,11 @@ func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { valuePath := nginxPlusPath.Child("value") switch addr.Type { - case ngfAPI.NginxPlusAllowCIDRAddressType: + case ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType: if err := k8svalidation.IsValidCIDR(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } - case ngfAPI.NginxPlusAllowIPAddressType: + case ngfAPIv1alpha2.NginxPlusAllowIPAddressType: if err := k8svalidation.IsValidIP(valuePath, addr.Value); err != nil { allErrs = append(allErrs, err...) } @@ -278,8 +406,8 @@ func validateNginxPlus(npCfg *ngfAPI.NginxProxy) field.ErrorList { field.NotSupported(nginxPlusPath.Child("type"), addr.Type, []string{ - string(ngfAPI.NginxPlusAllowCIDRAddressType), - string(ngfAPI.NginxPlusAllowIPAddressType), + string(ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType), + string(ngfAPIv1alpha2.NginxPlusAllowIPAddressType), }, ), ) diff --git a/internal/mode/static/state/graph/nginxproxy_test.go b/internal/mode/static/state/graph/nginxproxy_test.go index 325a996321..80c7ef6401 100644 --- a/internal/mode/static/state/graph/nginxproxy_test.go +++ b/internal/mode/static/state/graph/nginxproxy_test.go @@ -7,78 +7,285 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" v1 "sigs.k8s.io/gateway-api/apis/v1" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation/validationfakes" ) -func TestGetNginxProxy(t *testing.T) { +func createValidValidator() *validationfakes.FakeGenericValidator { + v := &validationfakes.FakeGenericValidator{} + v.ValidateEscapedStringNoVarExpansionReturns(nil) + v.ValidateEndpointReturns(nil) + v.ValidateServiceNameReturns(nil) + v.ValidateNginxDurationReturns(nil) + + return v +} + +func createInvalidValidator() *validationfakes.FakeGenericValidator { + v := &validationfakes.FakeGenericValidator{} + v.ValidateEscapedStringNoVarExpansionReturns(errors.New("error")) + v.ValidateEndpointReturns(errors.New("error")) + v.ValidateServiceNameReturns(errors.New("error")) + v.ValidateNginxDurationReturns(errors.New("error")) + + return v +} + +func TestBuildEffectiveNginxProxy(t *testing.T) { t.Parallel() + + newTestNginxProxy := func( + ipFam ngfAPIv1alpha2.IPFamilyType, + disableFeats []ngfAPIv1alpha2.DisableTelemetryFeature, + interval ngfAPIv1alpha1.Duration, + batchSize int32, + batchCount int32, + endpoint string, + serviceName string, + spanAttr ngfAPIv1alpha1.SpanAttribute, + mode ngfAPIv1alpha2.RewriteClientIPModeType, + trustedAddr []ngfAPIv1alpha2.RewriteClientIPAddress, + logLevel ngfAPIv1alpha2.NginxErrorLogLevel, + setIP bool, + disableHTTP bool, + ) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + IPFamily: &ipFam, + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: disableFeats, + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: &interval, + BatchSize: &batchSize, + BatchCount: &batchCount, + Endpoint: &endpoint, + }, + ServiceName: &serviceName, + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{spanAttr}, + }, + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: &mode, + SetIPRecursively: &setIP, + TrustedAddresses: trustedAddr, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: &logLevel, + }, + DisableHTTP2: &disableHTTP, + }, + } + } + + getNginxProxy := func() *ngfAPIv1alpha2.NginxProxy { + return newTestNginxProxy( + ngfAPIv1alpha2.Dual, + []ngfAPIv1alpha2.DisableTelemetryFeature{ngfAPIv1alpha2.DisableTracing}, + "10s", + 10, + 5, + "endpoint:1234", + "my-service", + ngfAPIv1alpha1.SpanAttribute{Key: "key", Value: "val"}, + ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor, + []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1"}, + }, + ngfAPIv1alpha2.NginxLogLevelAlert, + true, + false, + ) + } + + getNginxProxyAllFieldsSetDifferently := func() *ngfAPIv1alpha2.NginxProxy { + return newTestNginxProxy( + ngfAPIv1alpha2.IPv6, + []ngfAPIv1alpha2.DisableTelemetryFeature{}, + "5s", + 8, + 2, + "diff-endpoint:1234", + "diff-service", + ngfAPIv1alpha1.SpanAttribute{Key: "diff-key", Value: "diff-val"}, + ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor, + []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/24"}, + }, + ngfAPIv1alpha2.NginxLogLevelError, + false, + true, + ) + } + + getExpSpec := func() *EffectiveNginxProxy { + enp := EffectiveNginxProxy(getNginxProxy().Spec) + return &enp + } + + getModifiedExpSpec := func(mod func(*ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy) *EffectiveNginxProxy { + enp := EffectiveNginxProxy(mod(getNginxProxy()).Spec) + return &enp + } + tests := []struct { - nps map[types.NamespacedName]*ngfAPI.NginxProxy - gc *v1.GatewayClass - expNP *NginxProxy - name string + gcNp *NginxProxy + gwNp *NginxProxy + exp *EffectiveNginxProxy + name string }{ { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{ - {Name: "np1"}: {}, - }, - gc: nil, - expNP: nil, - name: "nil gatewayclass", + name: "both gateway class and gateway nginx proxies are nil", + gcNp: nil, + gwNp: nil, + exp: nil, }, { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{}, - gc: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "np1", + name: "nil gateway class nginx proxy", + gcNp: nil, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway class nginx proxy; invalid gateway nginx proxy", + gcNp: nil, + gwNp: &NginxProxy{Valid: false, Source: getNginxProxy()}, + exp: nil, + }, + { + name: "nil gateway class nginx proxy; nil gateway nginx proxy source", + gcNp: nil, + gwNp: &NginxProxy{Valid: true, Source: nil}, + exp: nil, + }, + { + name: "invalid gateway class nginx proxy", + gcNp: &NginxProxy{Valid: false}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway class nginx proxy source", + gcNp: &NginxProxy{Valid: true, Source: nil}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + exp: getExpSpec(), + }, + { + name: "nil gateway nginx proxy", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: nil, + exp: getExpSpec(), + }, + { + name: "invalid gateway nginx proxy", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: false}, + exp: getExpSpec(), + }, + { + name: "nil gateway nginx proxy source", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: true, Source: nil}, + exp: getExpSpec(), + }, + { + name: "both have all fields set; gateway values should win", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{Valid: true, Source: getNginxProxyAllFieldsSetDifferently()}, + exp: getModifiedExpSpec(func(_ *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + return getNginxProxyAllFieldsSetDifferently() + }), + }, + { + name: "gateway nginx proxy overrides nginx error log level", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + }, }, }, }, - expNP: nil, - name: "no nginxproxy resources", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.Logging.ErrorLevel = helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug) + return np + }), }, { - nps: map[types.NamespacedName]*ngfAPI.NginxProxy{ - {Name: "np1"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "np1", - }, - }, - {Name: "np2"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "np2", + name: "gateway nginx proxy overrides select telemetry values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("new-service-name"), + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + BatchSize: helpers.GetPointer[int32](20), + Endpoint: helpers.GetPointer("new-endpoint"), + }, + }, }, }, }, - gc: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "np2", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.Telemetry.ServiceName = helpers.GetPointer("new-service-name") + np.Spec.Telemetry.Exporter.Endpoint = helpers.GetPointer("new-endpoint") + np.Spec.Telemetry.Exporter.BatchSize = helpers.GetPointer[int32](20) + return np + }), + }, + { + name: "gateway nginx proxy overrides select rewrite client IP values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + SetIPRecursively: helpers.GetPointer(false), + }, }, }, }, - expNP: &NginxProxy{ - Source: &ngfAPI.NginxProxy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np2", - }, - Spec: ngfAPI.NginxProxySpec{ - IPFamily: helpers.GetPointer(ngfAPI.Dual), + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.RewriteClientIP.Mode = helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol) + np.Spec.RewriteClientIP.SetIPRecursively = helpers.GetPointer(false) + return np + }), + }, + { + name: "gateway nginx proxy unsets slices values", + gcNp: &NginxProxy{Valid: true, Source: getNginxProxy()}, + gwNp: &NginxProxy{ + Valid: true, + Source: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{}, + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{}, + }, + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{}, + }, }, }, - Valid: true, }, - name: "returns correct resource", + exp: getModifiedExpSpec(func(np *ngfAPIv1alpha2.NginxProxy) *ngfAPIv1alpha2.NginxProxy { + np.Spec.RewriteClientIP.TrustedAddresses = []ngfAPIv1alpha2.RewriteClientIPAddress{} + np.Spec.Telemetry.DisabledFeatures = []ngfAPIv1alpha2.DisableTelemetryFeature{} + np.Spec.Telemetry.SpanAttributes = []ngfAPIv1alpha1.SpanAttribute{} + return np + }), }, } @@ -87,64 +294,71 @@ func TestGetNginxProxy(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildNginxProxy(test.nps, test.gc, &validationfakes.FakeGenericValidator{})).To(Equal(test.expNP)) + enp := buildEffectiveNginxProxy(test.gcNp, test.gwNp) + g.Expect(enp).To(Equal(test.exp)) }) } } -func TestIsNginxProxyReferenced(t *testing.T) { +func TestTelemetryEnabledForNginxProxy(t *testing.T) { t.Parallel() + tests := []struct { - gc *GatewayClass - npName types.NamespacedName - name string - expRes bool + ep *EffectiveNginxProxy + name string + enabled bool }{ { - gc: &GatewayClass{ - Source: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", - }, - }, + name: "telemetry struct is nil", + ep: &EffectiveNginxProxy{ + Telemetry: nil, + }, + enabled: false, + }, + { + name: "telemetry exporter is nil", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: nil, }, }, - npName: types.NamespacedName{}, - expRes: false, - name: "nil nginxproxy", + enabled: false, }, { - gc: nil, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: false, - name: "nil gatewayclass", + name: "tracing is disabled", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ + ngfAPIv1alpha2.DisableTracing, + }, + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("new-endpoint"), + }, + }, + }, + enabled: false, }, { - gc: &GatewayClass{ - Source: nil, + name: "exporter endpoint is nil", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: nil, + }, + }, }, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: false, - name: "nil gatewayclass source", + enabled: false, }, { - gc: &GatewayClass{ - Source: &v1.GatewayClass{ - Spec: v1.GatewayClassSpec{ - ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, - Kind: v1.Kind(kinds.NginxProxy), - Name: "nginx-proxy", - }, + name: "normal case; enabled", + ep: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("new-endpoint"), }, }, }, - npName: types.NamespacedName{Name: "nginx-proxy"}, - expRes: true, - name: "references the NginxProxy", + enabled: true, }, } @@ -153,7 +367,162 @@ func TestIsNginxProxyReferenced(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(isNginxProxyReferenced(test.npName, test.gc)).To(Equal(test.expRes)) + enabled := telemetryEnabledForNginxProxy(test.ep) + g.Expect(enabled).To(Equal(test.enabled)) + }) + } +} + +func TestProcessNginxProxies(t *testing.T) { + t.Parallel() + + gatewayClassNpName := types.NamespacedName{Namespace: "gc-ns", Name: "gc-np"} + gatewayNpName := types.NamespacedName{Namespace: "gw-ns", Name: "gw-np"} + unreferencedNpName := types.NamespacedName{Namespace: "test", Name: "unref"} + + getTestNp := func(nsname types.NamespacedName) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nsname.Namespace, + Name: nsname.Name, + }, + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + ServiceName: helpers.GetPointer("service-name"), + }, + }, + } + } + + gateway := &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "gw-ns", + }, + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayNpName.Name, + }, + }, + }, + } + + gatewayClass := &v1.GatewayClass{ + Spec: v1.GatewayClassSpec{ + ParametersRef: &v1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayClassNpName.Name, + Namespace: helpers.GetPointer[v1.Namespace]("gc-ns"), + }, + }, + } + + gatewayClassRefMissingNs := &v1.GatewayClass{ + Spec: v1.GatewayClassSpec{ + ParametersRef: &v1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayClassNpName.Name, + }, + }, + } + + getNpMap := func() map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy { + return map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + gatewayClassNpName: getTestNp(gatewayClassNpName), + gatewayNpName: getTestNp(gatewayNpName), + unreferencedNpName: getTestNp(unreferencedNpName), + } + } + + getExpResult := func(valid bool) map[types.NamespacedName]*NginxProxy { + var errMsgs field.ErrorList + if !valid { + errMsgs = field.ErrorList{ + field.Invalid(field.NewPath("spec.telemetry.serviceName"), "service-name", "error"), + } + } + + return map[types.NamespacedName]*NginxProxy{ + gatewayNpName: { + Valid: valid, + ErrMsgs: errMsgs, + Source: getTestNp(gatewayNpName), + }, + gatewayClassNpName: { + Valid: valid, + ErrMsgs: errMsgs, + Source: getTestNp(gatewayClassNpName), + }, + } + } + + tests := []struct { + validator validation.GenericValidator + nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy + gc *v1.GatewayClass + gw *v1.Gateway + expResult map[types.NamespacedName]*NginxProxy + name string + }{ + { + name: "no nginx proxies", + nps: nil, + gc: gatewayClass, + gw: gateway, + validator: createValidValidator(), + expResult: nil, + }, + { + name: "gateway class param ref is missing namespace", + nps: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + gatewayClassNpName: getTestNp(gatewayClassNpName), + gatewayNpName: getTestNp(gatewayNpName), + }, + gc: gatewayClassRefMissingNs, + gw: gateway, + validator: createValidValidator(), + expResult: map[types.NamespacedName]*NginxProxy{ + gatewayNpName: { + Valid: true, + Source: getTestNp(gatewayNpName), + }, + }, + }, + { + name: "normal case; both nginx proxies are valid", + nps: getNpMap(), + gc: gatewayClass, + gw: gateway, + validator: createValidValidator(), + expResult: getExpResult(true), + }, + { + name: "normal case; both nginx proxies are invalid", + nps: getNpMap(), + gc: gatewayClass, + gw: gateway, + validator: createInvalidValidator(), + expResult: getExpResult(false), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + result := processNginxProxies( + test.nps, + test.validator, + test.gc, + test.gw, + ) + + g.Expect(helpers.Diff(test.expResult, result)).To(BeEmpty()) }) } } @@ -194,7 +563,7 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { gc: &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha2.GroupName, Kind: v1.Kind("WrongKind"), Name: "wrong-kind", }, @@ -207,7 +576,7 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { gc: &v1.GatewayClass{ Spec: v1.GatewayClassSpec{ ParametersRef: &v1.ParametersReference{ - Group: ngfAPI.GroupName, + Group: ngfAPIv1alpha2.GroupName, Kind: v1.Kind(kinds.NginxProxy), Name: "nginx-proxy", }, @@ -228,30 +597,95 @@ func TestGCReferencesAnyNginxProxy(t *testing.T) { } } -func createValidValidator() *validationfakes.FakeGenericValidator { - v := &validationfakes.FakeGenericValidator{} - v.ValidateEscapedStringNoVarExpansionReturns(nil) - v.ValidateEndpointReturns(nil) - v.ValidateServiceNameReturns(nil) - v.ValidateNginxDurationReturns(nil) - - return v -} +func TestGWReferencesAnyNginxProxy(t *testing.T) { + t.Parallel() + tests := []struct { + gw *v1.Gateway + name string + expRes bool + }{ + { + gw: nil, + expRes: false, + name: "nil gateway", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{}, + }, + expRes: false, + name: "nil infrastructure", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{}, + }, + }, + expRes: false, + name: "nil parametersRef", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group("wrong-group"), + Kind: v1.Kind(kinds.NginxProxy), + Name: "wrong-group", + }, + }, + }, + }, + expRes: false, + name: "wrong group name", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group(ngfAPIv1alpha2.GroupName), + Kind: v1.Kind("wrong-kind"), + Name: "wrong-kind", + }, + }, + }, + }, + expRes: false, + name: "wrong kind", + }, + { + gw: &v1.Gateway{ + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: v1.Group(ngfAPIv1alpha2.GroupName), + Kind: v1.Kind(kinds.NginxProxy), + Name: "normal", + }, + }, + }, + }, + expRes: true, + name: "references an NginxProxy", + }, + } -func createInvalidValidator() *validationfakes.FakeGenericValidator { - v := &validationfakes.FakeGenericValidator{} - v.ValidateEscapedStringNoVarExpansionReturns(errors.New("error")) - v.ValidateEndpointReturns(errors.New("error")) - v.ValidateServiceNameReturns(errors.New("error")) - v.ValidateNginxDurationReturns(errors.New("error")) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) - return v + g.Expect(gwReferencesAnyNginxProxy(test.gw)).To(Equal(test.expRes)) + }) + } } func TestValidateNginxProxy(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy validator *validationfakes.FakeGenericValidator name string expErrSubstring string @@ -260,36 +694,36 @@ func TestValidateNginxProxy(t *testing.T) { { name: "valid nginxproxy", validator: createValidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), - Exporter: &ngfAPI.TelemetryExporter{ - Interval: helpers.GetPointer[ngfAPI.Duration]("5ms"), - Endpoint: "my-endpoint", + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: helpers.GetPointer[ngfAPIv1alpha1.Duration]("5ms"), + Endpoint: helpers.GetPointer("my-endpoint"), }, - SpanAttributes: []ngfAPI.SpanAttribute{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "key", Value: "value"}, }, }, - IPFamily: helpers.GetPointer[ngfAPI.IPFamilyType](ngfAPI.Dual), - RewriteClientIP: &ngfAPI.RewriteClientIP{ + IPFamily: helpers.GetPointer[ngfAPIv1alpha2.IPFamilyType](ngfAPIv1alpha2.Dual), + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.1.1.1", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -298,9 +732,9 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid serviceName", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ ServiceName: helpers.GetPointer("my-svc"), // any value is invalid by the validator }, }, @@ -311,11 +745,11 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid endpoint", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Endpoint: "my-endpoint", // any value is invalid by the validator + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-endpoint"), // any value is invalid by the validator }, }, }, @@ -326,11 +760,11 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid interval", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - Exporter: &ngfAPI.TelemetryExporter{ - Interval: helpers.GetPointer[ngfAPI.Duration]( + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Interval: helpers.GetPointer[ngfAPIv1alpha1.Duration]( "my-interval", ), // any value is invalid by the validator }, @@ -343,10 +777,10 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid spanAttributes", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{ - SpanAttributes: []ngfAPI.SpanAttribute{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ {Key: "my-key", Value: "my-value"}, // any value is invalid by the validator }, }, @@ -358,10 +792,10 @@ func TestValidateNginxProxy(t *testing.T) { { name: "invalid ipFamily type", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Telemetry: &ngfAPI.Telemetry{}, - IPFamily: helpers.GetPointer[ngfAPI.IPFamilyType]("invalid"), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Telemetry: &ngfAPIv1alpha2.Telemetry{}, + IPFamily: helpers.GetPointer[ngfAPIv1alpha2.IPFamilyType]("invalid"), }, }, expErrSubstring: "spec.ipFamily", @@ -386,7 +820,7 @@ func TestValidateNginxProxy(t *testing.T) { func TestValidateRewriteClientIP(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy validator *validationfakes.FakeGenericValidator name string errorString string @@ -395,33 +829,33 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "valid rewriteClientIP", validator: createValidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.56.32.11/32", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.1.1.1", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "2001:db8:a0b:12f0::1", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -430,21 +864,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid CIDR in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8::/129", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/32", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -455,21 +889,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid IP address in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "1.2.3.4.5", }, { - Type: ngfAPI.RewriteClientIPIPAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPIPAddressType, Value: "10.0.0.1", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -480,21 +914,21 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid hostname in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "bad-host$%^", }, { - Type: ngfAPI.RewriteClientIPHostnameAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPHostnameAddressType, Value: "example.com", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -507,10 +941,10 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is set and trustedAddresses is empty", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -520,32 +954,32 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when trustedAddresses is greater in length than 16", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, }, }, }, @@ -556,17 +990,17 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is not proxyProtocol or XForwardedFor", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeType("invalid")), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeType("invalid")), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32", }, { - Type: ngfAPI.RewriteClientIPCIDRAddressType, + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, Value: "10.0.0.1/32", }, }, @@ -580,10 +1014,10 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid when mode is not proxyProtocol or XForwardedFor and trustedAddresses is empty", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeType("invalid")), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeType("invalid")), }, }, }, @@ -595,17 +1029,17 @@ func TestValidateRewriteClientIP(t *testing.T) { { name: "invalid address type in trustedAddresses", validator: createInvalidValidator(), - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - RewriteClientIP: &ngfAPI.RewriteClientIP{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), - TrustedAddresses: []ngfAPI.RewriteClientIPAddress{ + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ { - Type: ngfAPI.RewriteClientIPAddressType("invalid"), + Type: ngfAPIv1alpha2.RewriteClientIPAddressType("invalid"), Value: "2001:db8::/129", }, }, - Mode: helpers.GetPointer(ngfAPI.RewriteClientIPModeProxyProtocol), + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), }, }, }, @@ -631,19 +1065,19 @@ func TestValidateRewriteClientIP(t *testing.T) { func TestValidateLogging(t *testing.T) { t.Parallel() - invalidLogLevel := ngfAPI.NginxErrorLogLevel("invalid-log-level") + invalidLogLevel := ngfAPIv1alpha2.NginxErrorLogLevel("invalid-log-level") tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy name string errorString string expectErrCount int }{ { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelDebug), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, }, }, @@ -652,10 +1086,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelInfo), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), }, }, }, @@ -664,10 +1098,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelNotice), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), }, }, }, @@ -676,10 +1110,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelWarn), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), }, }, }, @@ -688,10 +1122,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelError), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, }, @@ -700,10 +1134,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelCrit), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), }, }, }, @@ -712,10 +1146,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelAlert), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), }, }, }, @@ -724,10 +1158,10 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPI.NginxLogLevelEmerg), + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), }, }, }, @@ -736,9 +1170,9 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{ + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ ErrorLevel: &invalidLogLevel, }, }, @@ -749,9 +1183,9 @@ func TestValidateLogging(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - Logging: &ngfAPI.NginxLogging{}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{}, }, }, name: "empty log level", @@ -778,20 +1212,20 @@ func TestValidateNginxPlus(t *testing.T) { t.Parallel() tests := []struct { - np *ngfAPI.NginxProxy + np *ngfAPIv1alpha2.NginxProxy name string errorString string expectErrCount int }{ { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "2001:db8:a0b:12f0::1"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "2001:db8:a0b:12f0::1"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/32"}, }, }, }, @@ -801,12 +1235,12 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 0, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, - {Type: ngfAPI.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/37"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "2001:db8:a0b:12f0::1/32"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowCIDRAddressType, Value: "127.0.0.3/37"}, }, }, }, @@ -817,12 +1251,12 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPI.NginxPlusAllowIPAddressType, Value: "127.0.0.3.5/32"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3.5/32"}, }, }, }, @@ -833,11 +1267,11 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowAddressType("Hostname"), Value: "example.com"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowAddressType("Hostname"), Value: "example.com"}, }, }, }, @@ -848,11 +1282,11 @@ func TestValidateNginxPlus(t *testing.T) { expectErrCount: 1, }, { - np: &ngfAPI.NginxProxy{ - Spec: ngfAPI.NginxProxySpec{ - NginxPlus: &ngfAPI.NginxPlus{ - AllowedAddresses: []ngfAPI.NginxPlusAllowAddress{ - {Type: ngfAPI.NginxPlusAllowAddressType("invalid"), Value: "example.com"}, + np: &ngfAPIv1alpha2.NginxProxy{ + Spec: ngfAPIv1alpha2.NginxProxySpec{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowAddressType("invalid"), Value: "example.com"}, }, }, }, @@ -877,3 +1311,11 @@ func TestValidateNginxPlus(t *testing.T) { }) } } + +func TestValidateNginxProxy_NilCase(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + // Just testing the nil case for coverage reasons. The rest of the function is covered by other tests. + g.Expect(buildNginxProxy(nil, &validationfakes.FakeGenericValidator{})).To(BeNil()) +} diff --git a/internal/mode/static/state/graph/route_common.go b/internal/mode/static/state/graph/route_common.go index f146a55adb..f40a647c1f 100644 --- a/internal/mode/static/state/graph/route_common.go +++ b/internal/mode/static/state/graph/route_common.go @@ -190,7 +190,7 @@ func buildL4RoutesForGateways( tlsRoutes map[types.NamespacedName]*v1alpha.TLSRoute, gatewayNsNames []types.NamespacedName, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, resolver *referenceGrantResolver, ) map[L4RouteKey]*L4Route { if len(gatewayNsNames) == 0 { @@ -219,7 +219,7 @@ func buildRoutesForGateways( httpRoutes map[types.NamespacedName]*v1.HTTPRoute, grpcRoutes map[types.NamespacedName]*v1.GRPCRoute, gatewayNsNames []types.NamespacedName, - npCfg *NginxProxy, + effectiveNginxProxy *EffectiveNginxProxy, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) map[RouteKey]*L7Route { if len(gatewayNsNames) == 0 { @@ -228,7 +228,7 @@ func buildRoutesForGateways( routes := make(map[RouteKey]*L7Route) - http2disabled := isHTTP2Disabled(npCfg) + http2disabled := isHTTP2Disabled(effectiveNginxProxy) for _, route := range httpRoutes { r := buildHTTPRoute(validator, route, gatewayNsNames, snippetsFilters) @@ -257,11 +257,16 @@ func buildRoutesForGateways( return routes } -func isHTTP2Disabled(npCfg *NginxProxy) bool { +func isHTTP2Disabled(npCfg *EffectiveNginxProxy) bool { if npCfg == nil { return false } - return npCfg.Source.Spec.DisableHTTP2 + + if npCfg.DisableHTTP2 == nil { + return false + } + + return *npCfg.DisableHTTP2 } func buildSectionNameRefs( diff --git a/internal/mode/static/state/graph/tlsroute.go b/internal/mode/static/state/graph/tlsroute.go index 78b2378c36..051cd134db 100644 --- a/internal/mode/static/state/graph/tlsroute.go +++ b/internal/mode/static/state/graph/tlsroute.go @@ -15,7 +15,7 @@ func buildTLSRoute( gtr *v1alpha2.TLSRoute, gatewayNsNames []types.NamespacedName, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, refGrantResolver func(resource toResource) bool, ) *L4Route { r := &L4Route{ @@ -70,7 +70,7 @@ func buildTLSRoute( func validateBackendRefTLSRoute( gtr *v1alpha2.TLSRoute, services map[types.NamespacedName]*apiv1.Service, - npCfg *NginxProxy, + npCfg *EffectiveNginxProxy, refGrantResolver func(resource toResource) bool, ) (BackendRef, *conditions.Condition) { // Length of BackendRefs and Rules is guaranteed to be one due to earlier check in buildTLSRoute diff --git a/internal/mode/static/state/graph/tlsroute_test.go b/internal/mode/static/state/graph/tlsroute_test.go index 73cd8758a1..40af4729aa 100644 --- a/internal/mode/static/state/graph/tlsroute_test.go +++ b/internal/mode/static/state/graph/tlsroute_test.go @@ -10,7 +10,7 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" - ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" @@ -271,9 +271,9 @@ func TestBuildTLSRoute(t *testing.T) { gtr *v1alpha2.TLSRoute services map[types.NamespacedName]*apiv1.Service resolver func(resource toResource) bool + npCfg *EffectiveNginxProxy name string gatewayNsNames []types.NamespacedName - npCfg NginxProxy }{ { gtr: duplicateParentRefsGtr, @@ -492,10 +492,7 @@ func TestBuildTLSRoute(t *testing.T) { services: map[types.NamespacedName]*apiv1.Service{ svcNsName: ipv4Svc, }, - npCfg: NginxProxy{ - Source: &ngfAPI.NginxProxy{Spec: ngfAPI.NginxProxySpec{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}}, - Valid: true, - }, + npCfg: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, resolver: alwaysTrueRefGrantResolver, name: "service and npcfg ip family mismatch", }, @@ -560,7 +557,7 @@ func TestBuildTLSRoute(t *testing.T) { test.gtr, test.gatewayNsNames, test.services, - &test.npCfg, + test.npCfg, test.resolver, ) g.Expect(helpers.Diff(test.expected, r)).To(BeEmpty()) diff --git a/internal/mode/static/status/prepare_requests.go b/internal/mode/static/status/prepare_requests.go index 818445d8d7..46e150a8cb 100644 --- a/internal/mode/static/status/prepare_requests.go +++ b/internal/mode/static/status/prepare_requests.go @@ -293,6 +293,8 @@ func prepareGatewayRequest( } gwConds := staticConds.NewDefaultGatewayConditions() + gwConds = append(gwConds, gateway.Conditions...) + if validListenerCount == 0 { gwConds = append(gwConds, staticConds.NewGatewayNotAcceptedListenersNotValid()...) } else if validListenerCount < len(gateway.Listeners) { diff --git a/internal/mode/static/status/prepare_requests_test.go b/internal/mode/static/status/prepare_requests_test.go index 5d7eb9f2ea..5c0a9df34c 100644 --- a/internal/mode/static/status/prepare_requests_test.go +++ b/internal/mode/static/status/prepare_requests_test.go @@ -1134,6 +1134,117 @@ func TestBuildGatewayStatuses(t *testing.T) { }, nginxReloadRes: NginxReloadResult{Error: errors.New("test error")}, }, + { + name: "valid gateway with valid parametersRef; all valid listeners", + gateway: &graph.Gateway{ + Source: createGateway(), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayResolvedRefs(), + }, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAccepted), + Message: "Gateway is accepted", + }, + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonProgrammed), + Message: "Gateway is programmed", + }, + { + Type: string(staticConds.GatewayResolvedRefs), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(staticConds.GatewayReasonResolvedRefs), + Message: "ParametersRef resource is resolved", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, + }, + { + name: "valid gateway with invalid parametersRef; all valid listeners", + gateway: &graph.Gateway{ + Source: createGateway(), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters("ParametersRef not found"), + }, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonProgrammed), + Message: "Gateway is programmed", + }, + { + Type: string(staticConds.GatewayResolvedRefs), + Status: metav1.ConditionFalse, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(staticConds.GatewayReasonParamsRefNotFound), + Message: "ParametersRef resource could not be found", + }, + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonInvalidParameters), + Message: "Gateway is accepted, but ParametersRef is ignored due to an error: ParametersRef not found", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, + }, } for _, test := range tests { diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index a349475a36..facc7dc56a 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -246,10 +246,7 @@ func collectGraphResourceCount( } } - if g.NginxProxy != nil { - ngfResourceCounts.NginxProxyCount = 1 - } - + ngfResourceCounts.NginxProxyCount = int64(len(g.ReferencedNginxProxies)) ngfResourceCounts.SnippetsFilterCount = int64(len(g.SnippetsFilters)) return ngfResourceCounts, nil diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index 845cb459fc..2d71dedf90 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -334,8 +334,10 @@ var _ = Describe("Collector", Ordered, func() { GVK: schema.GroupVersionKind{Kind: kinds.UpstreamSettingsPolicy}, }: {}, }, - NginxProxy: &graph.NginxProxy{}, - SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ + ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ + {Namespace: "test", Name: "NginxProxy-1"}: {}, + {Namespace: "test", Name: "NginxProxy-2"}: {}, + }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: { Snippets: map[ngfAPI.NginxContext]string{ ngfAPI.NginxContextMain: "worker_priority 0;", @@ -414,7 +416,7 @@ var _ = Describe("Collector", Ordered, func() { GatewayAttachedClientSettingsPolicyCount: 1, RouteAttachedClientSettingsPolicyCount: 2, ObservabilityPolicyCount: 1, - NginxProxyCount: 1, + NginxProxyCount: 2, SnippetsFilterCount: 3, UpstreamSettingsPolicyCount: 1, } @@ -613,7 +615,10 @@ var _ = Describe("Collector", Ordered, func() { GVK: schema.GroupVersionKind{Kind: kinds.UpstreamSettingsPolicy}, }: {}, }, - NginxProxy: &graph.NginxProxy{}, + ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ + {Namespace: "test", Name: "NginxProxy-1"}: {}, + {Namespace: "test", Name: "NginxProxy-2"}: {}, + }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: {}, }, @@ -689,7 +694,7 @@ var _ = Describe("Collector", Ordered, func() { GatewayAttachedClientSettingsPolicyCount: 1, RouteAttachedClientSettingsPolicyCount: 1, ObservabilityPolicyCount: 1, - NginxProxyCount: 1, + NginxProxyCount: 2, SnippetsFilterCount: 1, UpstreamSettingsPolicyCount: 1, } diff --git a/tests/suite/manifests/tracing/nginxproxy.yaml b/tests/suite/manifests/tracing/nginxproxy.yaml index ed1f621047..f4876eb186 100644 --- a/tests/suite/manifests/tracing/nginxproxy.yaml +++ b/tests/suite/manifests/tracing/nginxproxy.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.nginx.org/v1alpha1 +apiVersion: gateway.nginx.org/v1alpha2 kind: NginxProxy metadata: name: nginx-proxy From f94dbe0f1d3b6dc3567fd6b9f1168c23988a2cea Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Tue, 28 Jan 2025 12:43:48 -0700 Subject: [PATCH 05/32] CP/DP Split: write configuration to agent (#2999) This commit adds functionality to send nginx configuration to the agent. It also adds support for the single nginx Deployment to be scaled, and send configuration to all replicas. This requires tracking all Subscriptions for a particular deployment, and receiving all responses from those replicas to determine the status to write to the Gateway. --- build/Dockerfile | 2 +- charts/nginx-gateway-fabric/README.md | 1 - .../templates/clusterrole.yaml | 16 +- .../templates/deployment.yaml | 3 +- .../nginx-gateway-fabric/templates/scc.yaml | 13 +- .../templates/tmp-nginx-agent-conf.yaml | 24 + .../templates/tmp-nginx-deployment.yaml | 38 +- .../nginx-gateway-fabric/values.schema.json | 14 - charts/nginx-gateway-fabric/values.yaml | 4 - cmd/gateway/commands.go | 27 +- cmd/gateway/initialize.go | 19 +- cmd/gateway/initialize_test.go | 48 +- cmd/gateway/validation.go | 9 +- cmd/gateway/validation_test.go | 18 +- config/tests/static-deployment.yaml | 3 +- deploy/aws-nlb/deploy.yaml | 63 +- deploy/azure/deploy.yaml | 63 +- deploy/default/deploy.yaml | 63 +- deploy/experimental-nginx-plus/deploy.yaml | 70 +- deploy/experimental/deploy.yaml | 63 +- deploy/nginx-plus/deploy.yaml | 70 +- deploy/nodeport/deploy.yaml | 63 +- deploy/openshift/deploy.yaml | 73 +- .../snippets-filters-nginx-plus/deploy.yaml | 70 +- deploy/snippets-filters/deploy.yaml | 63 +- go.mod | 6 +- go.sum | 96 +- internal/framework/file/file.go | 43 +- internal/framework/file/file_test.go | 33 + internal/mode/static/handler.go | 125 ++- internal/mode/static/handler_test.go | 85 +- internal/mode/static/manager.go | 23 +- internal/mode/static/nginx/agent/agent.go | 224 ++++- .../mode/static/nginx/agent/agent_test.go | 307 ++++++ .../agent/agentfakes/fake_nginx_updater.go | 121 ++- .../static/nginx/agent/broadcast/broadcast.go | 159 +++ .../nginx/agent/broadcast/broadcast_test.go | 108 +++ .../broadcastfakes/fake_broadcaster.go | 215 +++++ .../mode/static/nginx/agent/broadcast/doc.go | 5 + internal/mode/static/nginx/agent/command.go | 435 ++++++++- .../mode/static/nginx/agent/command_test.go | 905 ++++++++++++++++++ .../mode/static/nginx/agent/deployment.go | 263 +++++ .../static/nginx/agent/deployment_test.go | 137 +++ internal/mode/static/nginx/agent/file.go | 80 +- internal/mode/static/nginx/agent/file_test.go | 209 ++++ .../static/nginx/agent/grpc/connections.go | 80 +- .../nginx/agent/grpc/connections_test.go | 99 ++ .../nginx/agent/grpc/context/context_test.go | 31 + internal/mode/static/nginx/agent/grpc/grpc.go | 5 +- .../grpcfakes/fake_connections_tracker.go | 233 +++++ .../static/nginx/agent/grpc/messenger/doc.go | 4 + .../nginx/agent/grpc/messenger/messenger.go | 111 +++ .../agent/grpc/messenger/messenger_test.go | 125 +++ .../messengerfakes/fake_messenger.go | 284 ++++++ .../config/configfakes/fake_generator.go | 42 +- .../mode/static/nginx/config/generator.go | 69 +- .../static/nginx/config/generator_test.go | 80 +- .../mode/static/nginx/config/main_config.go | 72 +- .../static/state/conditions/conditions.go | 4 +- internal/mode/static/state/graph/graph.go | 8 + .../mode/static/status/prepare_requests.go | 20 +- .../static/status/prepare_requests_test.go | 17 +- internal/mode/static/status/queue.go | 66 ++ internal/mode/static/status/queue_test.go | 94 ++ 64 files changed, 5319 insertions(+), 604 deletions(-) create mode 100644 internal/mode/static/nginx/agent/agent_test.go create mode 100644 internal/mode/static/nginx/agent/broadcast/broadcast.go create mode 100644 internal/mode/static/nginx/agent/broadcast/broadcast_test.go create mode 100644 internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go create mode 100644 internal/mode/static/nginx/agent/broadcast/doc.go create mode 100644 internal/mode/static/nginx/agent/command_test.go create mode 100644 internal/mode/static/nginx/agent/deployment.go create mode 100644 internal/mode/static/nginx/agent/deployment_test.go create mode 100644 internal/mode/static/nginx/agent/file_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/connections_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/context/context_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go create mode 100644 internal/mode/static/nginx/agent/grpc/messenger/doc.go create mode 100644 internal/mode/static/nginx/agent/grpc/messenger/messenger.go create mode 100644 internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go create mode 100644 internal/mode/static/status/queue.go create mode 100644 internal/mode/static/status/queue_test.go diff --git a/build/Dockerfile b/build/Dockerfile index b100acfaf0..7495a0b71e 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -14,7 +14,7 @@ FROM golang:1.24 AS ca-certs-provider FROM scratch AS common # CA certs are needed for telemetry report so that NGF can verify the server's certificate. COPY --from=ca-certs-provider --link /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -USER 102:1001 +USER 101:1001 ARG BUILD_AGENT ENV BUILD_AGENT=${BUILD_AGENT} ENTRYPOINT [ "/usr/bin/gateway" ] diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 096898bbde..6736a3a0b0 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -268,7 +268,6 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginx.image.tag` | | string | `"edge"` | | `nginx.lifecycle` | The lifecycle of the nginx container. | object | `{}` | | `nginx.plus` | Is NGINX Plus image being used | bool | `false` | -| `nginx.securityContext.allowPrivilegeEscalation` | Some environments may need this set to true in order for the control plane to successfully reload NGINX. | bool | `false` | | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.endpoint` | The endpoint of the NGINX Plus usage reporting server. Default: product.connect.nginx.com | string | `""` | diff --git a/charts/nginx-gateway-fabric/templates/clusterrole.yaml b/charts/nginx-gateway-fabric/templates/clusterrole.yaml index 9ee1be4254..830fe1b391 100644 --- a/charts/nginx-gateway-fabric/templates/clusterrole.yaml +++ b/charts/nginx-gateway-fabric/templates/clusterrole.yaml @@ -11,6 +11,7 @@ rules: - namespaces - services - secrets + - pods {{- if .Values.nginxGateway.gwAPIExperimentalFeatures.enable }} - configmaps {{- end }} @@ -18,28 +19,13 @@ rules: - get - list - watch -{{- if or .Values.nginxGateway.productTelemetry.enable .Values.nginx.plus }} -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get -{{- end }} -{{- if .Values.nginx.plus }} -- apiGroups: - - apps - resources: - - replicasets - verbs: - list -{{- end }} {{- if or .Values.nginxGateway.productTelemetry.enable .Values.nginx.plus }} - apiGroups: - "" diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 025da1ff39..33b965efd6 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -145,8 +145,9 @@ spec: capabilities: drop: - ALL + allowPrivilegeEscalation: false readOnlyRootFilesystem: true - runAsUser: 102 + runAsUser: 101 runAsGroup: 1001 {{- with .Values.nginxGateway.extraVolumeMounts -}} {{ toYaml . | nindent 8 }} diff --git a/charts/nginx-gateway-fabric/templates/scc.yaml b/charts/nginx-gateway-fabric/templates/scc.yaml index e58389a8ec..6ab7dc92c1 100644 --- a/charts/nginx-gateway-fabric/templates/scc.yaml +++ b/charts/nginx-gateway-fabric/templates/scc.yaml @@ -1,9 +1,10 @@ +# TODO(sberman): will need an SCC for nginx ServiceAccounts as well. {{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 metadata: name: {{ include "nginx-gateway.scc-name" . }} -allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} +allowPrivilegeEscalation: false allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false @@ -14,7 +15,7 @@ readOnlyRootFilesystem: true runAsUser: type: MustRunAsRange uidRangeMin: 101 - uidRangeMax: 102 + uidRangeMax: 101 fsGroup: type: MustRunAs ranges: @@ -29,16 +30,8 @@ seLinuxContext: type: MustRunAs seccompProfiles: - runtime/default -volumes: -- emptyDir -- secret -- configMap -- projected users: - {{ printf "system:serviceaccount:%s:%s" .Release.Namespace (include "nginx-gateway.serviceAccountName" .) }} -allowedCapabilities: -- NET_BIND_SERVICE -- KILL requiredDropCapabilities: - ALL {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml index 80aba1c868..6e85efffeb 100644 --- a/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml +++ b/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml @@ -15,5 +15,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics + {{- if .Values.nginx.plus }} + - api-action + {{- end }} log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml index 55c9ee5970..bb04bf46eb 100644 --- a/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml @@ -13,15 +13,16 @@ spec: labels: app.kubernetes.io/name: tmp-nginx-deployment app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + {{- if .Values.metrics.enable }} + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.port }}" + {{- if .Values.metrics.secure }} + prometheus.io/scheme: "https" + {{- end }} + {{- end }} spec: initContainers: - - name: sleep # wait for a bit for control plane to be ready - image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} - imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} - command: - - /usr/bin/gateway - - sleep - - --duration=15s - name: init image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} @@ -29,14 +30,20 @@ spec: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf + - --destination + - /etc/nginx/main-includes {{- if .Values.nginx.plus }} - --source - /includes/mgmt.conf - --nginx-plus - {{- end }} - --destination - /etc/nginx/main-includes + {{- end }} env: - name: POD_UID valueFrom: @@ -49,9 +56,13 @@ spec: drop: - ALL readOnlyRootFilesystem: true - runAsUser: 102 + runAsUser: 101 runAsGroup: 1001 volumeMounts: + - name: nginx-agent-config + mountPath: /agent + - name: nginx-agent + mountPath: /etc/nginx-agent - name: nginx-includes-bootstrap mountPath: /includes - name: nginx-main-includes @@ -69,10 +80,11 @@ spec: name: http - containerPort: 443 name: https + - name: metrics + containerPort: {{ .Values.metrics.port }} securityContext: seccompProfile: type: RuntimeDefault - allowPrivilegeEscalation: {{ .Values.nginx.securityContext.allowPrivilegeEscalation }} capabilities: add: - NET_BIND_SERVICE @@ -84,6 +96,8 @@ spec: volumeMounts: - name: nginx-agent mountPath: /etc/nginx-agent + - name: nginx-agent-log + mountPath: /var/log/nginx-agent - name: nginx-conf mountPath: /etc/nginx/conf.d - name: nginx-stream-conf @@ -140,8 +154,12 @@ spec: {{- end }} volumes: - name: nginx-agent + emptyDir: {} + - name: nginx-agent-config configMap: name: nginx-agent-config + - name: nginx-agent-log + emptyDir: {} - name: nginx-conf emptyDir: {} - name: nginx-stream-conf diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 36734ca989..05279c85b4 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -298,20 +298,6 @@ "title": "plus", "type": "boolean" }, - "securityContext": { - "properties": { - "allowPrivilegeEscalation": { - "default": false, - "description": "Some environments may need this set to true in order for the control plane to successfully reload NGINX.", - "required": [], - "title": "allowPrivilegeEscalation", - "type": "boolean" - } - }, - "required": [], - "title": "securityContext", - "type": "object" - }, "usage": { "description": "Configuration for NGINX Plus usage reporting.", "properties": { diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index ff4fc28630..eb20ec601c 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -134,10 +134,6 @@ nginx: # @schema pullPolicy: Always - securityContext: - # -- Some environments may need this set to true in order for the control plane to successfully reload NGINX. - allowPrivilegeEscalation: false - # -- Is NGINX Plus image being used plus: false diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index a48d649f28..da92044068 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -519,14 +519,14 @@ func createInitializeCommand() *cobra.Command { // flag values var srcFiles []string - var dest string + var destDirs []string var plus bool cmd := &cobra.Command{ Use: "initialize", Short: "Write initial configuration files", RunE: func(_ *cobra.Command, _ []string) error { - if err := validateCopyArgs(srcFiles, dest); err != nil { + if err := validateCopyArgs(srcFiles, destDirs); err != nil { return err } @@ -546,7 +546,7 @@ func createInitializeCommand() *cobra.Command { logger.Info( "Starting init container", "source filenames to copy", srcFiles, - "destination directory", dest, + "destination directories", destDirs, "nginx-plus", plus, ) @@ -558,16 +558,21 @@ func createInitializeCommand() *cobra.Command { Logger: logger.WithName("deployCtxCollector"), }) + files := make([]fileToCopy, 0, len(srcFiles)) + for i, src := range srcFiles { + files = append(files, fileToCopy{ + destDirName: destDirs[i], + srcFileName: src, + }) + } + return initialize(initializeConfig{ fileManager: file.NewStdLibOSFileManager(), fileGenerator: ngxConfig.NewGeneratorImpl(plus, nil, logger.WithName("generator")), logger: logger, plus: plus, collector: dcc, - copy: copyFiles{ - srcFileNames: srcFiles, - destDirName: dest, - }, + copy: files, }) }, } @@ -579,11 +584,11 @@ func createInitializeCommand() *cobra.Command { "The source files to be copied", ) - cmd.Flags().StringVar( - &dest, + cmd.Flags().StringSliceVar( + &destDirs, destFlag, - "", - "The destination directory for the source files to be copied to", + []string{}, + "The destination directories for the source files at the same array index to be copied to", ) cmd.Flags().BoolVar( diff --git a/cmd/gateway/initialize.go b/cmd/gateway/initialize.go index 02865d1a89..516d2e95f7 100644 --- a/cmd/gateway/initialize.go +++ b/cmd/gateway/initialize.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "os" "path/filepath" "time" @@ -17,9 +18,9 @@ const ( collectDeployCtxTimeout = 10 * time.Second ) -type copyFiles struct { - destDirName string - srcFileNames []string +type fileToCopy struct { + destDirName string + srcFileName string } type initializeConfig struct { @@ -27,13 +28,13 @@ type initializeConfig struct { fileManager file.OSFileManager fileGenerator config.Generator logger logr.Logger - copy copyFiles + copy []fileToCopy plus bool } func initialize(cfg initializeConfig) error { - for _, src := range cfg.copy.srcFileNames { - if err := copyFile(cfg.fileManager, src, cfg.copy.destDirName); err != nil { + for _, f := range cfg.copy { + if err := copyFile(cfg.fileManager, f.srcFileName, f.destDirName); err != nil { return err } } @@ -58,7 +59,7 @@ func initialize(cfg initializeConfig) error { return fmt.Errorf("failed to generate deployment context file: %w", err) } - if err := file.Write(cfg.fileManager, depCtxFile); err != nil { + if err := file.Write(cfg.fileManager, file.Convert(depCtxFile)); err != nil { return fmt.Errorf("failed to write deployment context file: %w", err) } @@ -84,5 +85,9 @@ func copyFile(osFileManager file.OSFileManager, src, dest string) error { return fmt.Errorf("error copying file contents: %w", err) } + if err := osFileManager.Chmod(destFile, os.FileMode(file.RegularFileModeInt)); err != nil { + return fmt.Errorf("error setting file permissions: %w", err) + } + return nil } diff --git a/cmd/gateway/initialize_test.go b/cmd/gateway/initialize_test.go index 4d7e606c0f..04999b6cad 100644 --- a/cmd/gateway/initialize_test.go +++ b/cmd/gateway/initialize_test.go @@ -28,9 +28,15 @@ func TestInitialize_OSS(t *testing.T) { ic := initializeConfig{ fileManager: fakeFileMgr, logger: logr.Discard(), - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: false, } @@ -56,9 +62,15 @@ func TestInitialize_OSS_Error(t *testing.T) { ic := initializeConfig{ fileManager: fakeFileMgr, logger: logr.Discard(), - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: false, } @@ -114,9 +126,15 @@ func TestInitialize_Plus(t *testing.T) { logger: logr.Discard(), collector: fakeCollector, fileGenerator: fakeGenerator, - copy: copyFiles{ - destDirName: "destDir", - srcFileNames: []string{"src1", "src2"}, + copy: []fileToCopy{ + { + destDirName: "destDir", + srcFileName: "src1", + }, + { + destDirName: "destDir2", + srcFileName: "src2", + }, }, plus: true, } @@ -133,7 +151,7 @@ func TestInitialize_Plus(t *testing.T) { g.Expect(fakeGenerator.GenerateDeploymentContextArgsForCall(0)).To(Equal(test.depCtx)) g.Expect(fakeCollector.CollectCallCount()).To(Equal(1)) g.Expect(fakeFileMgr.WriteCallCount()).To(Equal(1)) - g.Expect(fakeFileMgr.ChmodCallCount()).To(Equal(1)) + g.Expect(fakeFileMgr.ChmodCallCount()).To(Equal(3)) }) } } @@ -161,6 +179,7 @@ func TestCopyFileErrors(t *testing.T) { openErr := errors.New("open error") createErr := errors.New("create error") copyErr := errors.New("copy error") + chmodErr := errors.New("chmod error") tests := []struct { fileMgr *filefakes.FakeOSFileManager @@ -194,6 +213,15 @@ func TestCopyFileErrors(t *testing.T) { }, expErr: copyErr, }, + { + name: "can't set permissions", + fileMgr: &filefakes.FakeOSFileManager{ + ChmodStub: func(_ *os.File, _ os.FileMode) error { + return chmodErr + }, + }, + expErr: chmodErr, + }, } for _, test := range tests { diff --git a/cmd/gateway/validation.go b/cmd/gateway/validation.go index aced2ef06b..9e07a3a918 100644 --- a/cmd/gateway/validation.go +++ b/cmd/gateway/validation.go @@ -206,12 +206,15 @@ func ensureNoPortCollisions(ports ...int) error { return nil } -// validateCopyArgs ensures that arguments to the sleep command are set. -func validateCopyArgs(srcFiles []string, dest string) error { +// validateCopyArgs ensures that arguments to the initialize command are set. +func validateCopyArgs(srcFiles []string, destDirs []string) error { + if len(srcFiles) != len(destDirs) { + return errors.New("source and destination must have the same number of elements") + } if len(srcFiles) == 0 { return errors.New("source must not be empty") } - if len(dest) == 0 { + if len(destDirs) == 0 { return errors.New("destination must not be empty") } diff --git a/cmd/gateway/validation_test.go b/cmd/gateway/validation_test.go index 1774f13619..59db6fc57c 100644 --- a/cmd/gateway/validation_test.go +++ b/cmd/gateway/validation_test.go @@ -554,33 +554,39 @@ func TestEnsureNoPortCollisions(t *testing.T) { g.Expect(ensureNoPortCollisions(9113, 9113)).ToNot(Succeed()) } -func TestValidateSleepArgs(t *testing.T) { +func TestValidateInitializeArgs(t *testing.T) { t.Parallel() tests := []struct { name string - dest string + destDirs []string srcFiles []string expErr bool }{ { name: "valid values", - dest: "/dest/file", + destDirs: []string{"/dest/"}, srcFiles: []string{"/src/file"}, expErr: false, }, { name: "invalid dest", - dest: "", + destDirs: []string{}, srcFiles: []string{"/src/file"}, expErr: true, }, { name: "invalid src", - dest: "/dest/file", + destDirs: []string{"/dest/"}, srcFiles: []string{}, expErr: true, }, + { + name: "different lengths", + destDirs: []string{"/dest/"}, + srcFiles: []string{"src1", "src2"}, + expErr: true, + }, } for _, tc := range tests { @@ -588,7 +594,7 @@ func TestValidateSleepArgs(t *testing.T) { t.Parallel() g := NewWithT(t) - err := validateCopyArgs(tc.srcFiles, tc.dest) + err := validateCopyArgs(tc.srcFiles, tc.destDirs) if !tc.expErr { g.Expect(err).ToNot(HaveOccurred()) } else { diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml index 5009a827ed..2a53887183 100644 --- a/config/tests/static-deployment.yaml +++ b/config/tests/static-deployment.yaml @@ -69,8 +69,9 @@ spec: capabilities: drop: - ALL + allowPrivilegeEscalation: false readOnlyRootFilesystem: true - runAsUser: 102 + runAsUser: 101 runAsGroup: 1001 terminationGracePeriodSeconds: 30 serviceAccountName: nginx-gateway diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index 98dddfdc9b..c983b691fd 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -157,8 +153,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -293,12 +310,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -319,6 +337,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -332,8 +353,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -347,6 +369,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -362,17 +386,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -390,10 +411,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -404,9 +429,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 236c62288b..0e0330d243 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -157,8 +153,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -290,12 +307,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault nodeSelector: @@ -318,6 +336,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -331,8 +352,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -346,6 +368,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -361,17 +385,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -389,10 +410,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -405,9 +430,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index c066b92710..8e51e699fc 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -157,8 +153,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -290,12 +307,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -316,6 +334,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -329,8 +350,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -344,6 +366,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -359,17 +383,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -387,10 +408,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -401,9 +426,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index ae5ca25e93..009dd2aaad 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -30,28 +30,18 @@ rules: - namespaces - services - secrets + - pods - configmaps verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -170,8 +160,30 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics + - api-action log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -311,12 +323,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -337,6 +350,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -350,8 +366,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -365,6 +382,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -385,18 +404,17 @@ spec: name: nginx-plus-license subPath: license.jwt initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf + - --destination + - /etc/nginx/main-includes - --source - /includes/mgmt.conf - --nginx-plus @@ -416,10 +434,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -430,9 +452,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index 16c1d7c10f..c847f0a4cd 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -28,23 +28,19 @@ rules: - namespaces - services - secrets + - pods - configmaps verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -162,8 +158,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -296,12 +313,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -322,6 +340,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -335,8 +356,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -350,6 +372,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -365,17 +389,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -393,10 +414,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -407,9 +432,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 2ab6da27dd..282a7b8878 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -30,27 +30,17 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -165,8 +155,30 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics + - api-action log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -305,12 +317,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -331,6 +344,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -344,8 +360,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -359,6 +376,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -379,18 +398,17 @@ spec: name: nginx-plus-license subPath: license.jwt initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf + - --destination + - /etc/nginx/main-includes - --source - /includes/mgmt.conf - --nginx-plus @@ -410,10 +428,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -424,9 +446,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index a016150ab7..ec4d874a80 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -157,8 +153,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -290,12 +307,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -316,6 +334,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -329,8 +350,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -344,6 +366,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -359,17 +383,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -387,10 +408,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -401,9 +426,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index 43ba8df5fb..b22981cd1b 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -165,8 +161,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -298,12 +315,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -324,6 +342,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -337,8 +358,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -352,6 +374,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -367,17 +391,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -395,10 +416,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -409,9 +434,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} @@ -461,9 +490,6 @@ allowHostPID: false allowHostPorts: false allowPrivilegeEscalation: false allowPrivilegedContainer: false -allowedCapabilities: -- NET_BIND_SERVICE -- KILL apiVersion: security.openshift.io/v1 fsGroup: ranges: @@ -478,7 +504,7 @@ requiredDropCapabilities: - ALL runAsUser: type: MustRunAsRange - uidRangeMax: 102 + uidRangeMax: 101 uidRangeMin: 101 seLinuxContext: type: MustRunAs @@ -491,8 +517,3 @@ supplementalGroups: type: MustRunAs users: - system:serviceaccount:nginx-gateway:nginx-gateway -volumes: -- emptyDir -- secret -- configMap -- projected diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 1206af4707..ffed3588fd 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -30,27 +30,17 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - list - apiGroups: - "" @@ -167,8 +157,30 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics + - api-action log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -308,12 +320,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -334,6 +347,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -347,8 +363,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -362,6 +379,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -382,18 +401,17 @@ spec: name: nginx-plus-license subPath: license.jwt initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf + - --destination + - /etc/nginx/main-includes - --source - /includes/mgmt.conf - --nginx-plus @@ -413,10 +431,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -427,9 +449,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 98d20a0ea4..6fa5e75077 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -28,22 +28,18 @@ rules: - namespaces - services - secrets + - pods verbs: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - apps resources: - replicasets verbs: - get + - list - apiGroups: - "" resources: @@ -159,8 +155,29 @@ data: - /var/run/nginx features: - connection + - configuration + - certificates + - metrics log: level: debug + collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: 9113 kind: ConfigMap metadata: name: nginx-agent-config @@ -293,12 +310,13 @@ spec: initialDelaySeconds: 3 periodSeconds: 1 securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault securityContext: @@ -319,6 +337,9 @@ spec: app.kubernetes.io/name: tmp-nginx-deployment template: metadata: + annotations: + prometheus.io/port: "9113" + prometheus.io/scrape: "true" labels: app.kubernetes.io/instance: nginx-gateway app.kubernetes.io/name: tmp-nginx-deployment @@ -332,8 +353,9 @@ spec: name: http - containerPort: 443 name: https + - containerPort: 9113 + name: metrics securityContext: - allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE @@ -347,6 +369,8 @@ spec: volumeMounts: - mountPath: /etc/nginx-agent name: nginx-agent + - mountPath: /var/log/nginx-agent + name: nginx-agent-log - mountPath: /etc/nginx/conf.d name: nginx-conf - mountPath: /etc/nginx/stream-conf.d @@ -362,17 +386,14 @@ spec: - mountPath: /etc/nginx/includes name: nginx-includes initContainers: - - command: - - /usr/bin/gateway - - sleep - - --duration=15s - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: sleep - command: - /usr/bin/gateway - initialize - --source + - /agent/nginx-agent.conf + - --destination + - /etc/nginx-agent + - --source - /includes/main.conf - --destination - /etc/nginx/main-includes @@ -390,10 +411,14 @@ spec: - ALL readOnlyRootFilesystem: true runAsGroup: 1001 - runAsUser: 102 + runAsUser: 101 seccompProfile: type: RuntimeDefault volumeMounts: + - mountPath: /agent + name: nginx-agent-config + - mountPath: /etc/nginx-agent + name: nginx-agent - mountPath: /includes name: nginx-includes-bootstrap - mountPath: /etc/nginx/main-includes @@ -404,9 +429,13 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 volumes: + - emptyDir: {} + name: nginx-agent - configMap: name: nginx-agent-config - name: nginx-agent + name: nginx-agent-config + - emptyDir: {} + name: nginx-agent-log - emptyDir: {} name: nginx-conf - emptyDir: {} diff --git a/go.mod b/go.mod index 400114b860..c541024259 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,8 @@ require ( github.com/go-kit/log v0.2.1 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 - github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4 + github.com/google/uuid v1.6.0 + github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa github.com/nginx/telemetry-exporter v0.1.4 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 @@ -18,6 +19,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.uber.org/zap v1.27.0 google.golang.org/grpc v1.72.0 + google.golang.org/protobuf v1.36.6 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.32.3 @@ -50,7 +52,6 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -85,7 +86,6 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 5c0c2f008c..52014ba372 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,44 @@ buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 h1:2IGhRovxlsOIQgx2ekZWo4wTPAYpck41+18ICxs37is= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1/go.mod h1:Tgn5bgL220vkFOI0KPStlcClPeOJzAv4uT+V8JXGUnw= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -32,6 +54,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -62,6 +86,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -78,30 +104,60 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4 h1:Tn0SOlxq9uaJuqc6DUGZGYszrtHAHOaLnhbBWMzK1Bs= -github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8= +github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa h1:PvNHtYSv/glSxDkovCHJsDlNFHkvzoH2wAr6WtSNYcM= +github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= +github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o= +github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1/go.mod h1:He+1izxYxVVO5/C9ZTukwOpvkAx5eS19nRQgKXDhX5I= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= @@ -115,22 +171,52 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= +github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= @@ -158,6 +244,10 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= @@ -217,6 +307,8 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/framework/file/file.go b/internal/framework/file/file.go index 3533bda2ee..555731ba45 100644 --- a/internal/framework/file/file.go +++ b/internal/framework/file/file.go @@ -5,15 +5,21 @@ import ( "fmt" "io" "os" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ) //go:generate go tool counterfeiter -generate const ( - // regularFileMode defines the default file mode for regular files. - regularFileMode = 0o644 - // secretFileMode defines the default file mode for files with secrets. - secretFileMode = 0o640 + // RegularFileModeInt defines the default file mode for regular files as an integer. + RegularFileModeInt = 0o644 + // RegularFileMode defines the default file mode for regular files. + RegularFileMode = "0644" + // secretFileMode defines the default file mode for files with secrets as an integer. + secretFileModeInt = 0o640 + // SecretFileMode defines the default file mode for files with secrets. + SecretFileMode = "0640" ) // Type is the type of File. @@ -78,14 +84,14 @@ func Write(fileMgr OSFileManager, file File) error { switch file.Type { case TypeRegular: - if err := fileMgr.Chmod(f, regularFileMode); err != nil { + if err := fileMgr.Chmod(f, RegularFileModeInt); err != nil { resultErr = fmt.Errorf( - "failed to set file mode to %#o for %q: %w", regularFileMode, file.Path, err) + "failed to set file mode to %#o for %q: %w", RegularFileModeInt, file.Path, err) return resultErr } case TypeSecret: - if err := fileMgr.Chmod(f, secretFileMode); err != nil { - resultErr = fmt.Errorf("failed to set file mode to %#o for %q: %w", secretFileMode, file.Path, err) + if err := fileMgr.Chmod(f, secretFileModeInt); err != nil { + resultErr = fmt.Errorf("failed to set file mode to %#o for %q: %w", secretFileModeInt, file.Path, err) return resultErr } default: @@ -105,3 +111,24 @@ func ensureType(fileType Type) { panic(fmt.Sprintf("unknown file type %d", fileType)) } } + +// Convert an agent File to an internal File type. +func Convert(agentFile agent.File) File { + if agentFile.Meta == nil { + return File{} + } + + var t Type + switch agentFile.Meta.Permissions { + case RegularFileMode: + t = TypeRegular + case SecretFileMode: + t = TypeSecret + } + + return File{ + Content: agentFile.Contents, + Path: agentFile.Meta.Name, + Type: t, + } +} diff --git a/internal/framework/file/file_test.go b/internal/framework/file/file_test.go index 9c63c0ba27..c67678bbad 100644 --- a/internal/framework/file/file_test.go +++ b/internal/framework/file/file_test.go @@ -5,11 +5,13 @@ import ( "os" "path/filepath" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/file/filefakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ) var _ = Describe("Write files", Ordered, func() { @@ -152,4 +154,35 @@ var _ = Describe("Write files", Ordered, func() { ), ) }) + + It("converts agent files to internal files", func() { + agentFile := agent.File{ + Contents: []byte("file contents"), + Meta: &pb.FileMeta{ + Name: "regular-file", + Permissions: file.RegularFileMode, + }, + } + expFile := file.File{ + Path: "regular-file", + Content: []byte("file contents"), + Type: file.TypeRegular, + } + + secretAgentFile := agent.File{ + Contents: []byte("secret contents"), + Meta: &pb.FileMeta{ + Name: "secret-file", + Permissions: file.SecretFileMode, + }, + } + expSecretFile := file.File{ + Path: "secret-file", + Content: []byte("secret contents"), + Type: file.TypeSecret, + } + + Expect(file.Convert(agentFile)).To(Equal(expFile)) + Expect(file.Convert(secretAgentFile)).To(Equal(expSecretFile)) + }) }) diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 91b904d40e..1020f11e03 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -2,6 +2,7 @@ package static import ( "context" + "errors" "fmt" "sync" "time" @@ -35,6 +36,7 @@ type handlerMetricsCollector interface { // eventHandlerConfig holds configuration parameters for eventHandlerImpl. type eventHandlerConfig struct { + ctx context.Context // nginxUpdater updates nginx configuration using the NGINX agent. nginxUpdater agent.NginxUpdater // metricsCollector collects metrics for this controller. @@ -59,6 +61,12 @@ type eventHandlerConfig struct { deployCtxCollector licensing.Collector // graphBuiltHealthChecker sets the health of the Pod to Ready once we've built our initial graph. graphBuiltHealthChecker *graphBuiltHealthChecker + // statusQueue contains updates when the handler should write statuses. + statusQueue *status.Queue + // nginxDeployments contains a map of all nginx Deployments, and data about them. + nginxDeployments *agent.DeploymentStore + // logger is the logger for the event handler. + logger logr.Logger // gatewayPodConfig contains information about this Pod. gatewayPodConfig ngfConfig.GatewayPodConfig // controlConfigNSName is the NamespacedName of the NginxGateway config for this controller. @@ -102,8 +110,6 @@ type eventHandlerImpl struct { // objectFilters contains all created objectFilters, with the key being a filterKey objectFilters map[filterKey]objectFilter - latestReloadResult status.NginxReloadResult - cfg eventHandlerConfig lock sync.Mutex @@ -137,6 +143,8 @@ func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl { }, } + go handler.waitForStatusUpdates(cfg.ctx) + return handler } @@ -164,11 +172,49 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.cfg.graphBuiltHealthChecker.setAsReady() } - var err error + // TODO(sberman): hardcode this deployment name until we support provisioning data planes + // If no deployments exist, we should just return without doing anything. + deploymentName := types.NamespacedName{ + Name: "tmp-nginx-deployment", + Namespace: h.cfg.gatewayPodConfig.Namespace, + } + + // TODO(sberman): if nginx Deployment is scaled down, we should remove the pod from the ConnectionsTracker + // and Deployment. + // If fully deleted, then delete the deployment from the Store and close the stopCh. + stopCh := make(chan struct{}) + deployment := h.cfg.nginxDeployments.GetOrStore(ctx, deploymentName, stopCh) + if deployment == nil { + panic("expected deployment, got nil") + } + + configApplied := h.processStateAndBuildConfig(ctx, logger, gr, changeType, deployment) + + configErr := deployment.GetLatestConfigError() + upstreamErr := deployment.GetLatestUpstreamError() + err := errors.Join(configErr, upstreamErr) + + if configApplied || err != nil { + obj := &status.QueueObject{ + Error: err, + Deployment: deploymentName, + } + h.cfg.statusQueue.Enqueue(obj) + } +} + +func (h *eventHandlerImpl) processStateAndBuildConfig( + ctx context.Context, + logger logr.Logger, + gr *graph.Graph, + changeType state.ChangeType, + deployment *agent.Deployment, +) bool { + var configApplied bool switch changeType { case state.NoChange: logger.Info("Handling events didn't result into NGINX configuration changes") - return + return false case state.EndpointsOnlyChange: h.version++ cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) @@ -180,11 +226,13 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.setLatestConfiguration(&cfg) + deployment.Lock.Lock() if h.cfg.plus { - h.cfg.nginxUpdater.UpdateUpstreamServers() + configApplied = h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, cfg) } else { - err = h.updateNginxConf(cfg) + configApplied = h.updateNginxConf(deployment, cfg) } + deployment.Lock.Unlock() case state.ClusterStateChange: h.version++ cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) @@ -196,26 +244,43 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.setLatestConfiguration(&cfg) - err = h.updateNginxConf(cfg) + deployment.Lock.Lock() + configApplied = h.updateNginxConf(deployment, cfg) + deployment.Lock.Unlock() } - var nginxReloadRes status.NginxReloadResult - if err != nil { - logger.Error(err, "Failed to update NGINX configuration") - nginxReloadRes.Error = err - } else { - logger.Info("NGINX configuration was successfully updated") - } + return configApplied +} - h.latestReloadResult = nginxReloadRes +func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { + for { + item := h.cfg.statusQueue.Dequeue(ctx) + if item == nil { + return + } - h.updateStatuses(ctx, logger, gr) + var nginxReloadRes graph.NginxReloadResult + switch { + case item.Error != nil: + h.cfg.logger.Error(item.Error, "Failed to update NGINX configuration") + nginxReloadRes.Error = item.Error + default: + h.cfg.logger.Info("NGINX configuration was successfully updated") + } + + // TODO(sberman): once we support multiple Gateways, we'll have to get + // the correct Graph for the Deployment contained in the update message + gr := h.cfg.processor.GetLatestGraph() + gr.LatestReloadResult = nginxReloadRes + + h.updateStatuses(ctx, gr) + } } -func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logger, gr *graph.Graph) { +func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) { gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig) if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") + h.cfg.logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") } transitionTime := metav1.Now() @@ -228,7 +293,7 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logge gr.L4Routes, gr.Routes, transitionTime, - h.latestReloadResult, + gr.LatestReloadResult, h.cfg.gatewayCtlrName, ) @@ -260,7 +325,7 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logge gr.IgnoredGateways, transitionTime, gwAddresses, - h.latestReloadResult, + gr.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gwReqs...) } @@ -295,19 +360,19 @@ func (h *eventHandlerImpl) parseAndCaptureEvent(ctx context.Context, logger logr } // updateNginxConf updates nginx conf files and reloads nginx. -// -//nolint:unparam // temporarily returning only nil -func (h *eventHandlerImpl) updateNginxConf(conf dataplane.Configuration) error { +func (h *eventHandlerImpl) updateNginxConf( + deployment *agent.Deployment, + conf dataplane.Configuration, +) bool { files := h.cfg.generator.Generate(conf) - - h.cfg.nginxUpdater.UpdateConfig(len(files)) + applied := h.cfg.nginxUpdater.UpdateConfig(deployment, files) // If using NGINX Plus, update upstream servers using the API. if h.cfg.plus { - h.cfg.nginxUpdater.UpdateUpstreamServers() + h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, conf) } - return nil + return applied } // updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status @@ -423,6 +488,8 @@ func (h *eventHandlerImpl) GetLatestConfiguration() *dataplane.Configuration { } // setLatestConfiguration sets the latest configuration. +// TODO(sberman): once we support multiple Gateways, this will likely have to be a map +// of all configurations. func (h *eventHandlerImpl) setLatestConfiguration(cfg *dataplane.Configuration) { h.lock.Lock() defer h.lock.Unlock() @@ -482,7 +549,7 @@ func (h *eventHandlerImpl) nginxGatewayServiceUpsert(ctx context.Context, logger gr.IgnoredGateways, transitionTime, gwAddresses, - h.latestReloadResult, + gr.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) } @@ -508,7 +575,7 @@ func (h *eventHandlerImpl) nginxGatewayServiceDelete( gr.IgnoredGateways, transitionTime, gwAddresses, - h.latestReloadResult, + gr.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) } diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 43644981b4..8b81b364ac 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "go.uber.org/zap" @@ -19,18 +20,20 @@ import ( ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/status/statusfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing/licensingfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics/collectors" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/statefakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) var _ = Describe("eventHandler", func() { @@ -42,9 +45,12 @@ var _ = Describe("eventHandler", func() { fakeStatusUpdater *statusfakes.FakeGroupUpdater fakeEventRecorder *record.FakeRecorder fakeK8sClient client.WithWatch + queue *status.Queue namespace = "nginx-gateway" configName = "nginx-gateway-config" zapLogLevelSetter zapLogLevelSetter + ctx context.Context + cancel context.CancelFunc ) const nginxGatewayServiceName = "nginx-gateway" @@ -58,17 +64,20 @@ var _ = Describe("eventHandler", func() { } } - expectReconfig := func(expectedConf dataplane.Configuration, expectedFiles []file.File) { + expectReconfig := func(expectedConf dataplane.Configuration, expectedFiles []agent.File) { Expect(fakeProcessor.ProcessCallCount()).Should(Equal(1)) Expect(fakeGenerator.GenerateCallCount()).Should(Equal(1)) Expect(fakeGenerator.GenerateArgsForCall(0)).Should(Equal(expectedConf)) Expect(fakeNginxUpdater.UpdateConfigCallCount()).Should(Equal(1)) - lenFiles := fakeNginxUpdater.UpdateConfigArgsForCall(0) - Expect(expectedFiles).To(HaveLen(lenFiles)) + _, files := fakeNginxUpdater.UpdateConfigArgsForCall(0) + Expect(expectedFiles).To(Equal(files)) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).Should(Equal(2)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(2)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupAllExceptGateways)) Expect(reqs).To(BeEmpty()) @@ -79,19 +88,25 @@ var _ = Describe("eventHandler", func() { } BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext // ignore for test + fakeProcessor = &statefakes.FakeChangeProcessor{} fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) + fakeProcessor.GetLatestGraphReturns(&graph.Graph{}) fakeGenerator = &configfakes.FakeGenerator{} fakeNginxUpdater = &agentfakes.FakeNginxUpdater{} + fakeNginxUpdater.UpdateConfigReturns(true) fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} fakeEventRecorder = record.NewFakeRecorder(1) zapLogLevelSetter = newZapLogLevelSetter(zap.NewAtomicLevel()) fakeK8sClient = fake.NewFakeClient() + queue = status.NewQueue() // Needed because handler checks the service from the API on every HandleEventBatch Expect(fakeK8sClient.Create(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) handler = newEventHandlerImpl(eventHandlerConfig{ + ctx: ctx, k8sClient: fakeK8sClient, processor: fakeProcessor, generator: fakeGenerator, @@ -101,6 +116,8 @@ var _ = Describe("eventHandler", func() { eventRecorder: fakeEventRecorder, deployCtxCollector: &licensingfakes.FakeCollector{}, graphBuiltHealthChecker: newGraphBuiltHealthChecker(), + statusQueue: queue, + nginxDeployments: agent.NewDeploymentStore(&agentgrpcfakes.FakeConnectionsTracker{}), controlConfigNSName: types.NamespacedName{Namespace: namespace, Name: configName}, gatewayPodConfig: config.GatewayPodConfig{ ServiceName: "nginx-gateway", @@ -112,11 +129,16 @@ var _ = Describe("eventHandler", func() { Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) }) + AfterEach(func() { + cancel() + }) + Describe("Process the Gateway API resources events", func() { - fakeCfgFiles := []file.File{ + fakeCfgFiles := []agent.File{ { - Type: file.TypeRegular, - Path: "test.conf", + Meta: &pb.FileMeta{ + Name: "test.conf", + }, }, } @@ -211,7 +233,7 @@ var _ = Describe("eventHandler", func() { }, } - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ + gr := &graph.Graph{ GatewayClass: &graph.GatewayClass{ Source: gc, Valid: true, @@ -219,7 +241,10 @@ var _ = Describe("eventHandler", func() { IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ client.ObjectKeyFromObject(ignoredGC): ignoredGC, }, - }) + } + + fakeProcessor.ProcessReturns(state.ClusterStateChange, gr) + fakeProcessor.GetLatestGraphReturns(gr) e := &events.UpsertEvent{ Resource: &gatewayv1.HTTPRoute{}, // any supported is OK @@ -234,7 +259,10 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(2)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(2)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupAllExceptGateways)) @@ -440,6 +468,22 @@ var _ = Describe("eventHandler", func() { }) }) + It("should update status when receiving a queue event", func() { + obj := &status.QueueObject{ + Deployment: types.NamespacedName{}, + Error: errors.New("status error"), + } + queue.Enqueue(obj) + + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(2)) + + gr := handler.cfg.processor.GetLatestGraph() + Expect(gr.LatestReloadResult.Error.Error()).To(Equal("status error")) + }) + It("should set the health checker status properly", func() { e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} @@ -532,6 +576,17 @@ var _ = Describe("getDeploymentContext", func() { }) When("nginx plus is true", func() { + var ctx context.Context + var cancel context.CancelFunc + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext + }) + + AfterEach(func() { + cancel() + }) + It("returns deployment context", func() { expDepCtx := dataplane.DeploymentContext{ Integration: "ngf", @@ -541,7 +596,9 @@ var _ = Describe("getDeploymentContext", func() { } handler := newEventHandlerImpl(eventHandlerConfig{ - plus: true, + ctx: ctx, + statusQueue: status.NewQueue(), + plus: true, deployCtxCollector: &licensingfakes.FakeCollector{ CollectStub: func(_ context.Context) (dataplane.DeploymentContext, error) { return expDepCtx, nil @@ -557,7 +614,9 @@ var _ = Describe("getDeploymentContext", func() { expErr := errors.New("collect error") handler := newEventHandlerImpl(eventHandlerConfig{ - plus: true, + ctx: ctx, + statusQueue: status.NewQueue(), + plus: true, deployCtxCollector: &licensingfakes.FakeCollector{ CollectStub: func(_ context.Context) (dataplane.DeploymentContext, error) { return dataplane.DeploymentContext{}, expErr diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 70413ffe05..e6959e6609 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -45,7 +45,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/framework/runnables" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" + frameworkStatus "github.com/nginx/nginx-gateway-fabric/internal/framework/status" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" @@ -62,6 +62,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/telemetry" ) @@ -160,7 +161,7 @@ func StartManager(cfg config.Config) error { handlerCollector, ok := handlerCollector.(prometheus.Collector) if !ok { - return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", status.ErrFailedAssert) + return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", frameworkStatus.ErrFailedAssert) } metrics.Registry.MustRegister( @@ -169,19 +170,25 @@ func StartManager(cfg config.Config) error { ) } - statusUpdater := status.NewUpdater( + statusUpdater := frameworkStatus.NewUpdater( mgr.GetClient(), cfg.Logger.WithName("statusUpdater"), ) - groupStatusUpdater := status.NewLeaderAwareGroupUpdater(statusUpdater) + groupStatusUpdater := frameworkStatus.NewLeaderAwareGroupUpdater(statusUpdater) deployCtxCollector := licensing.NewDeploymentContextCollector(licensing.DeploymentContextCollectorConfig{ K8sClientReader: mgr.GetAPIReader(), PodUID: cfg.GatewayPodConfig.UID, Logger: cfg.Logger.WithName("deployCtxCollector"), }) - nginxUpdater := agent.NewNginxUpdater(cfg.Logger.WithName("nginxUpdater"), cfg.Plus) + statusQueue := status.NewQueue() + nginxUpdater := agent.NewNginxUpdater( + cfg.Logger.WithName("nginxUpdater"), + mgr.GetAPIReader(), + statusQueue, + cfg.Plus, + ) grpcServer := agentgrpc.NewServer( cfg.Logger.WithName("agentGRPCServer"), @@ -196,8 +203,8 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register grpc server: %w", err) } - // TODO(sberman): event handler loop should wait on a channel until the grpc server has started eventHandler := newEventHandlerImpl(eventHandlerConfig{ + ctx: ctx, nginxUpdater: nginxUpdater, metricsCollector: handlerCollector, statusUpdater: groupStatusUpdater, @@ -210,6 +217,7 @@ func StartManager(cfg config.Config) error { ), k8sClient: mgr.GetClient(), k8sReader: mgr.GetAPIReader(), + logger: cfg.Logger.WithName("eventHandler"), logLevelSetter: logLevelSetter, eventRecorder: recorder, deployCtxCollector: deployCtxCollector, @@ -219,6 +227,8 @@ func StartManager(cfg config.Config) error { gatewayCtlrName: cfg.GatewayCtlrName, updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, plus: cfg.Plus, + statusQueue: statusQueue, + nginxDeployments: nginxUpdater.NginxDeployments, }) objects, objectLists := prepareFirstEventBatchPreparerArgs(cfg) @@ -507,6 +517,7 @@ func registerControllers( objectType: &ngfAPIv1alpha1.NginxGateway{}, options: []controller.Option{ controller.WithNamespacedNameFilter(filter.CreateSingleResourceFilter(controlConfigNSName)), + controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), }, }) if err := setInitialConfig( diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 1ce5d21b0b..28a20f1872 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -1,48 +1,240 @@ package agent import ( + "context" + "errors" + "fmt" + "time" + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/protobuf/types/known/structpb" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) +const retryUpstreamTimeout = 5 * time.Second + //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate //counterfeiter:generate . NginxUpdater // NginxUpdater is an interface for updating NGINX using the NGINX agent. type NginxUpdater interface { - UpdateConfig(int) - UpdateUpstreamServers() + UpdateConfig(deployment *Deployment, files []File) bool + UpdateUpstreamServers(deployment *Deployment, conf dataplane.Configuration) bool } // NginxUpdaterImpl implements the NginxUpdater interface. type NginxUpdaterImpl struct { - CommandService *commandService - FileService *fileService - logger logr.Logger - plus bool + CommandService *commandService + FileService *fileService + NginxDeployments *DeploymentStore + logger logr.Logger + plus bool + retryTimeout time.Duration } // NewNginxUpdater returns a new NginxUpdaterImpl instance. -func NewNginxUpdater(logger logr.Logger, plus bool) *NginxUpdaterImpl { +func NewNginxUpdater( + logger logr.Logger, + reader client.Reader, + statusQueue *status.Queue, + plus bool, +) *NginxUpdaterImpl { + connTracker := agentgrpc.NewConnectionsTracker() + nginxDeployments := NewDeploymentStore(connTracker) + + commandService := newCommandService( + logger.WithName("commandService"), + reader, + nginxDeployments, + connTracker, + statusQueue, + ) + fileService := newFileService(logger.WithName("fileService"), nginxDeployments, connTracker) + return &NginxUpdaterImpl{ - logger: logger, - plus: plus, - CommandService: newCommandService(logger.WithName("commandService")), - FileService: newFileService(logger.WithName("fileService")), + logger: logger, + plus: plus, + NginxDeployments: nginxDeployments, + CommandService: commandService, + FileService: fileService, + retryTimeout: retryUpstreamTimeout, } } // UpdateConfig sends the nginx configuration to the agent. -func (n *NginxUpdaterImpl) UpdateConfig(files int) { - n.logger.Info("Sending nginx configuration to agent", "numFiles", files) +// Returns whether the configuration was sent to any agents. +// +// The flow of events is as follows: +// - Set the configuration files on the deployment. +// - Broadcast the message containing file metadata to all pods (subscriptions) for the deployment. +// - Agent receives a ConfigApplyRequest with the list of file metadata. +// - Agent calls GetFile for each file in the list, which we send back to the agent. +// - Agent updates nginx, and responds with a DataPlaneResponse. +// - Subscriber responds back to the broadcaster to inform that the transaction is complete. +// - If any errors occurred, they are set on the deployment for the handler to use in the status update. +func (n *NginxUpdaterImpl) UpdateConfig( + deployment *Deployment, + files []File, +) bool { + n.logger.Info("Sending nginx configuration to agent") + + msg := deployment.SetFiles(files) + applied := deployment.GetBroadcaster().Send(msg) + + deployment.SetLatestConfigError(deployment.GetConfigurationStatus()) + + return applied } // UpdateUpstreamServers sends an APIRequest to the agent to update upstream servers using the NGINX Plus API. // Only applicable when using NGINX Plus. -func (n *NginxUpdaterImpl) UpdateUpstreamServers() { +// Returns whether the configuration was sent to any agents. +func (n *NginxUpdaterImpl) UpdateUpstreamServers( + deployment *Deployment, + conf dataplane.Configuration, +) bool { if !n.plus { - return + return false + } + + broadcaster := deployment.GetBroadcaster() + + // reset the latest error to nil now that we're applying new config + deployment.SetLatestUpstreamError(nil) + + // TODO(sberman): optimize this by only sending updates that are necessary. + // Call GetUpstreams first (will need Subscribers to send responses back), and + // then determine which upstreams actually need to be updated. + + var errs []error + var applied bool + actions := make([]*pb.NGINXPlusAction, 0, len(conf.Upstreams)+len(conf.StreamUpstreams)) + for _, upstream := range conf.Upstreams { + action := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: buildHTTPUpstreamServers(upstream), + }, + } + actions = append(actions, action) + } + + for _, upstream := range conf.StreamUpstreams { + action := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: buildStreamUpstreamServers(upstream), + }, + } + actions = append(actions, action) + } + + for _, action := range actions { + msg := broadcast.NginxAgentMessage{ + Type: broadcast.APIRequest, + NGINXPlusAction: action, + } + + requestApplied, err := n.sendRequest(broadcaster, msg, deployment) + if err != nil { + errs = append(errs, fmt.Errorf( + "couldn't update upstream via the API: %w", deployment.GetConfigurationStatus())) + } + applied = applied || requestApplied + } + + if len(errs) != 0 { + deployment.SetLatestUpstreamError(errors.Join(errs...)) + } else if applied { + n.logger.Info("Updated upstream servers using NGINX Plus API") + } + + // Store the most recent actions on the deployment so any new subscribers can apply them when first connecting. + deployment.SetNGINXPlusActions(actions) + + return applied +} + +func buildHTTPUpstreamServers(upstream dataplane.Upstream) *pb.UpdateHTTPUpstreamServers { + return &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: upstream.Name, + Servers: buildUpstreamServers(upstream), + } +} + +func buildStreamUpstreamServers(upstream dataplane.Upstream) *pb.UpdateStreamServers { + return &pb.UpdateStreamServers{ + UpstreamStreamName: upstream.Name, + Servers: buildUpstreamServers(upstream), + } +} + +func buildUpstreamServers(upstream dataplane.Upstream) []*structpb.Struct { + servers := make([]*structpb.Struct, 0, len(upstream.Endpoints)) + + for _, endpoint := range upstream.Endpoints { + port, format := getPortAndIPFormat(endpoint) + value := fmt.Sprintf(format, endpoint.Address, port) + + server := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue(value), + }, + } + + servers = append(servers, server) + } + + return servers +} + +func (n *NginxUpdaterImpl) sendRequest( + broadcaster broadcast.Broadcaster, + msg broadcast.NginxAgentMessage, + deployment *Deployment, +) (bool, error) { + // retry the API update request because sometimes nginx isn't quite ready after the config apply reload + ctx, cancel := context.WithTimeout(context.Background(), n.retryTimeout) + defer cancel() + + var applied bool + if err := wait.PollUntilContextCancel( + ctx, + 500*time.Millisecond, + true, // poll immediately + func(_ context.Context) (bool, error) { + applied = broadcaster.Send(msg) + if statusErr := deployment.GetConfigurationStatus(); statusErr != nil { + return false, nil //nolint:nilerr // will get error once done polling + } + + return true, nil + }, + ); err != nil { + return applied, err + } + + return applied, nil +} + +func getPortAndIPFormat(ep resolver.Endpoint) (string, string) { + var port string + + if ep.Port != 0 { + port = fmt.Sprintf(":%d", ep.Port) + } + + format := "%s%s" + if ep.IPv6 { + format = "[%s]%s" } - n.logger.Info("Updating upstream servers using NGINX Plus API") + return port, format } diff --git a/internal/mode/static/nginx/agent/agent_test.go b/internal/mode/static/nginx/agent/agent_test.go new file mode 100644 index 0000000000..b159d5be5c --- /dev/null +++ b/internal/mode/static/nginx/agent/agent_test.go @@ -0,0 +1,307 @@ +package agent + +import ( + "errors" + "fmt" + "testing" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/types/known/structpb" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +func TestUpdateConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + configApplied bool + expErr bool + }{ + { + name: "success", + configApplied: true, + expErr: false, + }, + { + name: "error returned from agent", + configApplied: true, + expErr: true, + }, + { + name: "configuration not applied", + configApplied: false, + expErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + fakeBroadcaster.SendReturns(test.configApplied) + + plus := false + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, plus) + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + file := File{ + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + } + + testErr := errors.New("test error") + if test.expErr { + deployment.SetPodErrorStatus("pod1", testErr) + } + + applied := updater.UpdateConfig(deployment, []File{file}) + + g.Expect(applied).To(Equal(test.configApplied)) + g.Expect(deployment.GetFile(file.Meta.Name, file.Meta.Hash)).To(Equal(file.Contents)) + + if test.expErr { + g.Expect(deployment.GetLatestConfigError()).To(Equal(testErr)) + // ensure that the error is cleared after the next config is applied + deployment.SetPodErrorStatus("pod1", nil) + updater.UpdateConfig(deployment, []File{file}) + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + } else { + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + } + }) + } +} + +func TestUpdateUpstreamServers(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + buildUpstreams bool + plus bool + configApplied bool + expErr bool + }{ + { + name: "success", + plus: true, + buildUpstreams: true, + configApplied: true, + expErr: false, + }, + { + name: "no upstreams to apply", + plus: true, + buildUpstreams: false, + configApplied: false, + expErr: false, + }, + { + name: "not running nginx plus", + plus: false, + configApplied: false, + expErr: false, + }, + { + name: "error returned from agent", + plus: true, + buildUpstreams: true, + configApplied: true, + expErr: true, + }, + { + name: "configuration not applied", + plus: true, + buildUpstreams: true, + configApplied: false, + expErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + fakeBroadcaster.SendReturns(test.configApplied) + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, test.plus) + updater.retryTimeout = 0 + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + testErr := errors.New("test error") + if test.expErr { + deployment.SetPodErrorStatus("pod1", testErr) + } + + var conf dataplane.Configuration + if test.buildUpstreams { + conf = dataplane.Configuration{ + Upstreams: []dataplane.Upstream{ + { + Name: "test-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "1.2.3.4", + Port: 8080, + }, + }, + }, + }, + StreamUpstreams: []dataplane.Upstream{ + { + Name: "test-stream-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "5.6.7.8", + }, + }, + }, + }, + } + } + + applied := updater.UpdateUpstreamServers(deployment, conf) + g.Expect(applied).To(Equal(test.configApplied)) + + expActions := make([]*pb.NGINXPlusAction, 0) + if test.buildUpstreams { + expActions = []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "test-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("1.2.3.4:8080"), + }, + }, + }, + }, + }, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "test-stream-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("5.6.7.8"), + }, + }, + }, + }, + }, + }, + } + } + + if !test.plus { + g.Expect(deployment.GetNGINXPlusActions()).To(BeNil()) + } else { + g.Expect(deployment.GetNGINXPlusActions()).To(Equal(expActions)) + } + + if test.expErr { + expErr := errors.Join( + fmt.Errorf("couldn't update upstream via the API: %w", testErr), + fmt.Errorf("couldn't update upstream via the API: %w", testErr), + ) + + g.Expect(deployment.GetLatestUpstreamError()).To(Equal(expErr)) + // ensure that the error is cleared after the next config is applied + deployment.SetPodErrorStatus("pod1", nil) + updater.UpdateUpstreamServers(deployment, conf) + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) + } else { + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) + } + }) + } +} + +func TestGetPortAndIPFormat(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + expPort string + expFormat string + endpoint resolver.Endpoint + }{ + { + name: "IPv4 with port", + endpoint: resolver.Endpoint{ + Address: "1.2.3.4", + Port: 8080, + IPv6: false, + }, + expPort: ":8080", + expFormat: "%s%s", + }, + { + name: "IPv4 without port", + endpoint: resolver.Endpoint{ + Address: "1.2.3.4", + Port: 0, + IPv6: false, + }, + expPort: "", + expFormat: "%s%s", + }, + { + name: "IPv6 with port", + endpoint: resolver.Endpoint{ + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + Port: 8080, + IPv6: true, + }, + expPort: ":8080", + expFormat: "[%s]%s", + }, + { + name: "IPv6 without port", + endpoint: resolver.Endpoint{ + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + Port: 0, + IPv6: true, + }, + expPort: "", + expFormat: "[%s]%s", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + port, format := getPortAndIPFormat(test.endpoint) + g.Expect(port).To(Equal(test.expPort)) + g.Expect(format).To(Equal(test.expFormat)) + }) + } +} diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go index 4d8858a173..6c3165e5b6 100644 --- a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go +++ b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go @@ -5,33 +5,61 @@ import ( "sync" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) type FakeNginxUpdater struct { - UpdateConfigStub func(int) + UpdateConfigStub func(*agent.Deployment, []agent.File) bool updateConfigMutex sync.RWMutex updateConfigArgsForCall []struct { - arg1 int + arg1 *agent.Deployment + arg2 []agent.File } - UpdateUpstreamServersStub func() + updateConfigReturns struct { + result1 bool + } + updateConfigReturnsOnCall map[int]struct { + result1 bool + } + UpdateUpstreamServersStub func(*agent.Deployment, dataplane.Configuration) bool updateUpstreamServersMutex sync.RWMutex updateUpstreamServersArgsForCall []struct { + arg1 *agent.Deployment + arg2 dataplane.Configuration + } + updateUpstreamServersReturns struct { + result1 bool + } + updateUpstreamServersReturnsOnCall map[int]struct { + result1 bool } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeNginxUpdater) UpdateConfig(arg1 int) { +func (fake *FakeNginxUpdater) UpdateConfig(arg1 *agent.Deployment, arg2 []agent.File) bool { + var arg2Copy []agent.File + if arg2 != nil { + arg2Copy = make([]agent.File, len(arg2)) + copy(arg2Copy, arg2) + } fake.updateConfigMutex.Lock() + ret, specificReturn := fake.updateConfigReturnsOnCall[len(fake.updateConfigArgsForCall)] fake.updateConfigArgsForCall = append(fake.updateConfigArgsForCall, struct { - arg1 int - }{arg1}) + arg1 *agent.Deployment + arg2 []agent.File + }{arg1, arg2Copy}) stub := fake.UpdateConfigStub - fake.recordInvocation("UpdateConfig", []interface{}{arg1}) + fakeReturns := fake.updateConfigReturns + fake.recordInvocation("UpdateConfig", []interface{}{arg1, arg2Copy}) fake.updateConfigMutex.Unlock() if stub != nil { - fake.UpdateConfigStub(arg1) + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 } + return fakeReturns.result1 } func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { @@ -40,29 +68,60 @@ func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { return len(fake.updateConfigArgsForCall) } -func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(int)) { +func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(*agent.Deployment, []agent.File) bool) { fake.updateConfigMutex.Lock() defer fake.updateConfigMutex.Unlock() fake.UpdateConfigStub = stub } -func (fake *FakeNginxUpdater) UpdateConfigArgsForCall(i int) int { +func (fake *FakeNginxUpdater) UpdateConfigArgsForCall(i int) (*agent.Deployment, []agent.File) { fake.updateConfigMutex.RLock() defer fake.updateConfigMutex.RUnlock() argsForCall := fake.updateConfigArgsForCall[i] - return argsForCall.arg1 + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNginxUpdater) UpdateConfigReturns(result1 bool) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = nil + fake.updateConfigReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeNginxUpdater) UpdateConfigReturnsOnCall(i int, result1 bool) { + fake.updateConfigMutex.Lock() + defer fake.updateConfigMutex.Unlock() + fake.UpdateConfigStub = nil + if fake.updateConfigReturnsOnCall == nil { + fake.updateConfigReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.updateConfigReturnsOnCall[i] = struct { + result1 bool + }{result1} } -func (fake *FakeNginxUpdater) UpdateUpstreamServers() { +func (fake *FakeNginxUpdater) UpdateUpstreamServers(arg1 *agent.Deployment, arg2 dataplane.Configuration) bool { fake.updateUpstreamServersMutex.Lock() + ret, specificReturn := fake.updateUpstreamServersReturnsOnCall[len(fake.updateUpstreamServersArgsForCall)] fake.updateUpstreamServersArgsForCall = append(fake.updateUpstreamServersArgsForCall, struct { - }{}) + arg1 *agent.Deployment + arg2 dataplane.Configuration + }{arg1, arg2}) stub := fake.UpdateUpstreamServersStub - fake.recordInvocation("UpdateUpstreamServers", []interface{}{}) + fakeReturns := fake.updateUpstreamServersReturns + fake.recordInvocation("UpdateUpstreamServers", []interface{}{arg1, arg2}) fake.updateUpstreamServersMutex.Unlock() if stub != nil { - fake.UpdateUpstreamServersStub() + return stub(arg1, arg2) } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 } func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { @@ -71,12 +130,42 @@ func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { return len(fake.updateUpstreamServersArgsForCall) } -func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func()) { +func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func(*agent.Deployment, dataplane.Configuration) bool) { fake.updateUpstreamServersMutex.Lock() defer fake.updateUpstreamServersMutex.Unlock() fake.UpdateUpstreamServersStub = stub } +func (fake *FakeNginxUpdater) UpdateUpstreamServersArgsForCall(i int) (*agent.Deployment, dataplane.Configuration) { + fake.updateUpstreamServersMutex.RLock() + defer fake.updateUpstreamServersMutex.RUnlock() + argsForCall := fake.updateUpstreamServersArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersReturns(result1 bool) { + fake.updateUpstreamServersMutex.Lock() + defer fake.updateUpstreamServersMutex.Unlock() + fake.UpdateUpstreamServersStub = nil + fake.updateUpstreamServersReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeNginxUpdater) UpdateUpstreamServersReturnsOnCall(i int, result1 bool) { + fake.updateUpstreamServersMutex.Lock() + defer fake.updateUpstreamServersMutex.Unlock() + fake.UpdateUpstreamServersStub = nil + if fake.updateUpstreamServersReturnsOnCall == nil { + fake.updateUpstreamServersReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.updateUpstreamServersReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *FakeNginxUpdater) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() diff --git a/internal/mode/static/nginx/agent/broadcast/broadcast.go b/internal/mode/static/nginx/agent/broadcast/broadcast.go new file mode 100644 index 0000000000..2b21ae1117 --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcast.go @@ -0,0 +1,159 @@ +package broadcast + +import ( + "context" + "sync" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "k8s.io/apimachinery/pkg/util/uuid" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +//counterfeiter:generate . Broadcaster + +// Broadcaster defines an interface for consumers to subscribe to File updates. +type Broadcaster interface { + Subscribe() SubscriberChannels + Send(NginxAgentMessage) bool + CancelSubscription(string) +} + +// SubscriberChannels are the channels sent to the subscriber to listen and respond on. +// The ID is used for map lookup to delete a subscriber when it's gone. +type SubscriberChannels struct { + ListenCh <-chan NginxAgentMessage + ResponseCh chan<- struct{} + ID string +} + +// storedChannels are the same channels used in the SubscriberChannels, but reverse direction. +// These are used to store the channels for the broadcaster to send and listen on, +// and can be looked up in the map using the same ID. +type storedChannels struct { + listenCh chan<- NginxAgentMessage + responseCh <-chan struct{} + id string +} + +// DeploymentBroadcaster sends out a signal when an nginx Deployment has updated +// configuration files. The signal is received by any agent Subscription that cares +// about this Deployment. The agent Subscription will then send a response of whether or not +// the configuration was successfully applied. +type DeploymentBroadcaster struct { + publishCh chan NginxAgentMessage + subCh chan storedChannels + unsubCh chan string + listeners map[string]storedChannels + doneCh chan struct{} +} + +// NewDeploymentBroadcaster returns a new instance of a DeploymentBroadcaster. +func NewDeploymentBroadcaster(ctx context.Context, stopCh chan struct{}) *DeploymentBroadcaster { + broadcaster := &DeploymentBroadcaster{ + listeners: make(map[string]storedChannels), + publishCh: make(chan NginxAgentMessage), + subCh: make(chan storedChannels), + unsubCh: make(chan string), + doneCh: make(chan struct{}), + } + go broadcaster.run(ctx, stopCh) + + return broadcaster +} + +// Subscribe allows a listener to subscribe to broadcast messages. It returns the channel +// to listen on for messages, as well as a channel to respond on. +func (b *DeploymentBroadcaster) Subscribe() SubscriberChannels { + listenCh := make(chan NginxAgentMessage) + responseCh := make(chan struct{}) + id := string(uuid.NewUUID()) + + subscriberChans := SubscriberChannels{ + ID: id, + ListenCh: listenCh, + ResponseCh: responseCh, + } + storedChans := storedChannels{ + id: id, + listenCh: listenCh, + responseCh: responseCh, + } + + b.subCh <- storedChans + return subscriberChans +} + +// Send the message to all listeners. Wait for all listeners to respond. +// Returns true if there were listeners that received the message. +func (b *DeploymentBroadcaster) Send(message NginxAgentMessage) bool { + b.publishCh <- message + <-b.doneCh + + return len(b.listeners) > 0 +} + +// CancelSubscription removes a Subscriber from the channel list. +func (b *DeploymentBroadcaster) CancelSubscription(id string) { + b.unsubCh <- id +} + +// run starts the broadcaster loop. It handles the following events: +// - if stopCh is closed, return. +// - if receiving a new subscriber, add it to the subscriber list. +// - if receiving a canceled subscription, remove it from the subscriber list. +// - if receiving a message to publish, send it to all subscribers. +func (b *DeploymentBroadcaster) run(ctx context.Context, stopCh chan struct{}) { + for { + select { + case <-stopCh: + return + case <-ctx.Done(): + return + case channels := <-b.subCh: + b.listeners[channels.id] = channels + case id := <-b.unsubCh: + delete(b.listeners, id) + case msg := <-b.publishCh: + var wg sync.WaitGroup + wg.Add(len(b.listeners)) + + for _, channels := range b.listeners { + go func() { + defer wg.Done() + + // send message and wait for it to be read + channels.listenCh <- msg + // wait for response + <-channels.responseCh + }() + } + wg.Wait() + + b.doneCh <- struct{}{} + } + } +} + +// MessageType is the type of message to be sent. +type MessageType int + +const ( + // ConfigApplyRequest sends files to update nginx configuration. + ConfigApplyRequest MessageType = iota + // APIRequest sends an NGINX Plus API request to update configuration. + APIRequest +) + +// NginxAgentMessage is sent to all subscribers to send to the nginx agents for either a ConfigApplyRequest +// or an APIActionRequest. +type NginxAgentMessage struct { + // ConfigVersion is the hashed configuration version of the included files. + ConfigVersion string + // NGINXPlusAction is an NGINX Plus API action to be sent. + NGINXPlusAction *pb.NGINXPlusAction + // FileOverviews contain the overviews of all files to be sent. + FileOverviews []*pb.File + // Type defines the type of message to be sent. + Type MessageType +} diff --git a/internal/mode/static/nginx/agent/broadcast/broadcast_test.go b/internal/mode/static/nginx/agent/broadcast/broadcast_test.go new file mode 100644 index 0000000000..950293c4e1 --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcast_test.go @@ -0,0 +1,108 @@ +package broadcast_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" +) + +func TestSubscribe(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber := broadcaster.Subscribe() + g.Expect(subscriber.ID).NotTo(BeEmpty()) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeTrue()) + }() + + g.Eventually(subscriber.ListenCh).Should(Receive(Equal(message))) +} + +func TestSubscribe_MultipleListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber1 := broadcaster.Subscribe() + subscriber2 := broadcaster.Subscribe() + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeTrue()) + }() + + g.Eventually(subscriber1.ListenCh).Should(Receive(Equal(message))) + g.Eventually(subscriber2.ListenCh).Should(Receive(Equal(message))) + + subscriber1.ResponseCh <- struct{}{} + subscriber2.ResponseCh <- struct{}{} +} + +func TestSubscribe_NoListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + result := broadcaster.Send(message) + g.Expect(result).To(BeFalse()) +} + +func TestCancelSubscription(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + stopCh := make(chan struct{}) + defer close(stopCh) + + broadcaster := broadcast.NewDeploymentBroadcaster(context.Background(), stopCh) + + subscriber := broadcaster.Subscribe() + + broadcaster.CancelSubscription(subscriber.ID) + + message := broadcast.NginxAgentMessage{ + ConfigVersion: "v1", + Type: broadcast.ConfigApplyRequest, + } + + go func() { + result := broadcaster.Send(message) + g.Expect(result).To(BeFalse()) + }() + + g.Consistently(subscriber.ListenCh).ShouldNot(Receive()) +} diff --git a/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go b/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go new file mode 100644 index 0000000000..0d820ef98a --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/broadcastfakes/fake_broadcaster.go @@ -0,0 +1,215 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package broadcastfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" +) + +type FakeBroadcaster struct { + CancelSubscriptionStub func(string) + cancelSubscriptionMutex sync.RWMutex + cancelSubscriptionArgsForCall []struct { + arg1 string + } + SendStub func(broadcast.NginxAgentMessage) bool + sendMutex sync.RWMutex + sendArgsForCall []struct { + arg1 broadcast.NginxAgentMessage + } + sendReturns struct { + result1 bool + } + sendReturnsOnCall map[int]struct { + result1 bool + } + SubscribeStub func() broadcast.SubscriberChannels + subscribeMutex sync.RWMutex + subscribeArgsForCall []struct { + } + subscribeReturns struct { + result1 broadcast.SubscriberChannels + } + subscribeReturnsOnCall map[int]struct { + result1 broadcast.SubscriberChannels + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeBroadcaster) CancelSubscription(arg1 string) { + fake.cancelSubscriptionMutex.Lock() + fake.cancelSubscriptionArgsForCall = append(fake.cancelSubscriptionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.CancelSubscriptionStub + fake.recordInvocation("CancelSubscription", []interface{}{arg1}) + fake.cancelSubscriptionMutex.Unlock() + if stub != nil { + fake.CancelSubscriptionStub(arg1) + } +} + +func (fake *FakeBroadcaster) CancelSubscriptionCallCount() int { + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + return len(fake.cancelSubscriptionArgsForCall) +} + +func (fake *FakeBroadcaster) CancelSubscriptionCalls(stub func(string)) { + fake.cancelSubscriptionMutex.Lock() + defer fake.cancelSubscriptionMutex.Unlock() + fake.CancelSubscriptionStub = stub +} + +func (fake *FakeBroadcaster) CancelSubscriptionArgsForCall(i int) string { + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + argsForCall := fake.cancelSubscriptionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBroadcaster) Send(arg1 broadcast.NginxAgentMessage) bool { + fake.sendMutex.Lock() + ret, specificReturn := fake.sendReturnsOnCall[len(fake.sendArgsForCall)] + fake.sendArgsForCall = append(fake.sendArgsForCall, struct { + arg1 broadcast.NginxAgentMessage + }{arg1}) + stub := fake.SendStub + fakeReturns := fake.sendReturns + fake.recordInvocation("Send", []interface{}{arg1}) + fake.sendMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBroadcaster) SendCallCount() int { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + return len(fake.sendArgsForCall) +} + +func (fake *FakeBroadcaster) SendCalls(stub func(broadcast.NginxAgentMessage) bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = stub +} + +func (fake *FakeBroadcaster) SendArgsForCall(i int) broadcast.NginxAgentMessage { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + argsForCall := fake.sendArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeBroadcaster) SendReturns(result1 bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + fake.sendReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeBroadcaster) SendReturnsOnCall(i int, result1 bool) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + if fake.sendReturnsOnCall == nil { + fake.sendReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.sendReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *FakeBroadcaster) Subscribe() broadcast.SubscriberChannels { + fake.subscribeMutex.Lock() + ret, specificReturn := fake.subscribeReturnsOnCall[len(fake.subscribeArgsForCall)] + fake.subscribeArgsForCall = append(fake.subscribeArgsForCall, struct { + }{}) + stub := fake.SubscribeStub + fakeReturns := fake.subscribeReturns + fake.recordInvocation("Subscribe", []interface{}{}) + fake.subscribeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeBroadcaster) SubscribeCallCount() int { + fake.subscribeMutex.RLock() + defer fake.subscribeMutex.RUnlock() + return len(fake.subscribeArgsForCall) +} + +func (fake *FakeBroadcaster) SubscribeCalls(stub func() broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = stub +} + +func (fake *FakeBroadcaster) SubscribeReturns(result1 broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = nil + fake.subscribeReturns = struct { + result1 broadcast.SubscriberChannels + }{result1} +} + +func (fake *FakeBroadcaster) SubscribeReturnsOnCall(i int, result1 broadcast.SubscriberChannels) { + fake.subscribeMutex.Lock() + defer fake.subscribeMutex.Unlock() + fake.SubscribeStub = nil + if fake.subscribeReturnsOnCall == nil { + fake.subscribeReturnsOnCall = make(map[int]struct { + result1 broadcast.SubscriberChannels + }) + } + fake.subscribeReturnsOnCall[i] = struct { + result1 broadcast.SubscriberChannels + }{result1} +} + +func (fake *FakeBroadcaster) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.cancelSubscriptionMutex.RLock() + defer fake.cancelSubscriptionMutex.RUnlock() + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + fake.subscribeMutex.RLock() + defer fake.subscribeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeBroadcaster) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ broadcast.Broadcaster = new(FakeBroadcaster) diff --git a/internal/mode/static/nginx/agent/broadcast/doc.go b/internal/mode/static/nginx/agent/broadcast/doc.go new file mode 100644 index 0000000000..3640dcfa5e --- /dev/null +++ b/internal/mode/static/nginx/agent/broadcast/doc.go @@ -0,0 +1,5 @@ +/* +Package broadcast contains the functions for creating a broadcaster to send updates to consumers. +It is used to send nginx configuration for an nginx Deployment to all pod subscribers for that Deployment. +*/ +package broadcast diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index 79c863d129..04b482ffba 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -4,28 +4,59 @@ import ( "context" "errors" "fmt" + "io" + "strings" "time" "github.com/go-logr/logr" + "github.com/google/uuid" pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcStatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) +const connectionWaitTimeout = 30 * time.Second + // commandService handles the connection and subscription to the data plane agent. type commandService struct { pb.CommandServiceServer - connTracker *agentgrpc.ConnectionsTracker + nginxDeployments *DeploymentStore + statusQueue *status.Queue + connTracker agentgrpc.ConnectionsTracker + k8sReader client.Reader // TODO(sberman): all logs are at Info level right now. Adjust appropriately. - logger logr.Logger + logger logr.Logger + connectionTimeout time.Duration } -func newCommandService(logger logr.Logger) *commandService { +func newCommandService( + logger logr.Logger, + reader client.Reader, + depStore *DeploymentStore, + connTracker agentgrpc.ConnectionsTracker, + statusQueue *status.Queue, +) *commandService { return &commandService{ - logger: logger, - connTracker: agentgrpc.NewConnectionsTracker(), + connectionTimeout: connectionWaitTimeout, + k8sReader: reader, + logger: logger, + connTracker: connTracker, + nginxDeployments: depStore, + statusQueue: statusQueue, } } @@ -34,6 +65,8 @@ func (cs *commandService) Register(server *grpc.Server) { } // CreateConnection registers a data plane agent with the control plane. +// The nginx InstanceID could be empty if the agent hasn't discovered its nginx instance yet. +// Once discovered, the agent will send an UpdateDataPlaneStatus request with the nginx InstanceID set. func (cs *commandService) CreateConnection( ctx context.Context, req *pb.CreateConnectionRequest, @@ -47,10 +80,29 @@ func (cs *commandService) CreateConnection( return nil, agentgrpc.ErrStatusInvalidConnection } - podName := req.GetResource().GetContainerInfo().GetHostname() - + resource := req.GetResource() + podName := resource.GetContainerInfo().GetHostname() cs.logger.Info(fmt.Sprintf("Creating connection for nginx pod: %s", podName)) - cs.connTracker.Track(gi.IPAddress, podName) + + owner, err := cs.getPodOwner(podName) + if err != nil { + response := &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_ERROR, + Message: "error getting pod owner", + Error: err.Error(), + }, + } + cs.logger.Error(err, "error getting pod owner") + return response, grpcStatus.Errorf(codes.Internal, "error getting pod owner %s", err.Error()) + } + + conn := agentgrpc.Connection{ + Parent: owner, + PodName: podName, + InstanceID: getNginxInstanceID(resource.GetInstances()), + } + cs.connTracker.Track(gi.IPAddress, conn) return &pb.CreateConnectionResponse{ Response: &pb.CommandResponse{ @@ -60,6 +112,13 @@ func (cs *commandService) CreateConnection( } // Subscribe is a decoupled communication mechanism between the data plane agent and control plane. +// The series of events are as follows: +// - Wait for the agent to register its nginx instance with the control plane. +// - Grab the most recent deployment configuration for itself, and attempt to apply it. +// - Subscribe to any future updates from the NginxUpdater and start a loop to listen for those updates. +// If any connection or unrecoverable errors occur, return and agent should re-establish a subscription. +// If errors occur with applying the config, log and put those errors into the status queue to be written +// to the Gateway status. func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error { ctx := in.Context() @@ -68,73 +127,361 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error return agentgrpc.ErrStatusInvalidConnection } - cs.logger.Info(fmt.Sprintf("Received subscribe request from %q", gi.IPAddress)) - - go cs.listenForDataPlaneResponse(ctx, in) - - // wait for the agent to report itself - podName, err := cs.waitForConnection(ctx, gi) + // wait for the agent to report itself and nginx + conn, deployment, err := cs.waitForConnection(ctx, gi) if err != nil { cs.logger.Error(err, "error waiting for connection") return err } - cs.logger.Info(fmt.Sprintf("Handling subscription for %s/%s", podName, gi.IPAddress)) + cs.logger.Info(fmt.Sprintf("Successfully connected to nginx agent %s", conn.PodName)) + + msgr := messenger.New(in) + go msgr.Run(ctx) + + // apply current config before starting event loop + deployment.Lock.RLock() + if err := cs.setInitialConfig(ctx, deployment, conn, msgr); err != nil { + deployment.Lock.RUnlock() + + return err + } + deployment.Lock.RUnlock() + + // subscribe to the deployment broadcaster to get file updates + broadcaster := deployment.GetBroadcaster() + channels := broadcaster.Subscribe() + defer broadcaster.CancelSubscription(channels.ID) + for { + // When a message is received over the ListenCh, it is assumed and required that the + // deployment object is already LOCKED. This lock is acquired by the event handler before calling + // `updateNginxConfig`. The entire transaction (as described in above in the function comment) + // must be locked to prevent the deployment files from changing during the transaction. + // This means that the lock is held until we receive either an error or response from agent + // (via msgr.Errors() or msgr.Mesages()) and respond back, finally returning to the event handler + // which releases the lock. select { case <-ctx.Done(): - return ctx.Err() - case <-time.After(1 * time.Minute): - dummyRequest := &pb.ManagementPlaneRequest{ - Request: &pb.ManagementPlaneRequest_HealthRequest{ - HealthRequest: &pb.HealthRequest{}, - }, + select { + case channels.ResponseCh <- struct{}{}: + default: } - if err := in.Send(dummyRequest); err != nil { // TODO(sberman): will likely need retry logic + return grpcStatus.Error(codes.Canceled, context.Cause(ctx).Error()) + case msg := <-channels.ListenCh: + var req *pb.ManagementPlaneRequest + switch msg.Type { + case broadcast.ConfigApplyRequest: + req = buildRequest(msg.FileOverviews, conn.InstanceID, msg.ConfigVersion) + case broadcast.APIRequest: + req = buildPlusAPIRequest(msg.NGINXPlusAction, conn.InstanceID) + default: + panic(fmt.Sprintf("unknown request type %d", msg.Type)) + } + + if err := msgr.Send(ctx, req); err != nil { cs.logger.Error(err, "error sending request to agent") + deployment.SetPodErrorStatus(conn.PodName, err) + channels.ResponseCh <- struct{}{} + + return grpcStatus.Error(codes.Internal, err.Error()) } + case err = <-msgr.Errors(): + cs.logger.Error(err, "connection error", "pod", conn.PodName) + deployment.SetPodErrorStatus(conn.PodName, err) + channels.ResponseCh <- struct{}{} + + if errors.Is(err, io.EOF) { + return grpcStatus.Error(codes.Aborted, err.Error()) + } + return grpcStatus.Error(codes.Internal, err.Error()) + case msg := <-msgr.Messages(): + res := msg.GetCommandResponse() + if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { + err := fmt.Errorf("bad response from agent: msg: %s; error: %s", res.GetMessage(), res.GetError()) + deployment.SetPodErrorStatus(conn.PodName, err) + } else { + deployment.SetPodErrorStatus(conn.PodName, nil) + } + channels.ResponseCh <- struct{}{} } } } -// TODO(sberman): current issue: when control plane restarts, agent doesn't re-establish a CreateConnection call, -// so this fails. -func (cs *commandService) waitForConnection(ctx context.Context, gi grpcContext.GrpcInfo) (string, error) { - var podName string +func (cs *commandService) waitForConnection( + ctx context.Context, + gi grpcContext.GrpcInfo, +) (*agentgrpc.Connection, *Deployment, error) { ticker := time.NewTicker(time.Second) defer ticker.Stop() - timer := time.NewTimer(30 * time.Second) + timer := time.NewTimer(cs.connectionTimeout) defer timer.Stop() + agentConnectErr := errors.New("timed out waiting for agent to register nginx") + deploymentStoreErr := errors.New("timed out waiting for nginx deployment to be added to store") + + var err error for { select { case <-ctx.Done(): - return "", ctx.Err() + return nil, nil, ctx.Err() case <-timer.C: - return "", errors.New("timed out waiting for agent connection") + return nil, nil, err case <-ticker.C: - if podName = cs.connTracker.GetConnection(gi.IPAddress); podName != "" { - return podName, nil + if conn := cs.connTracker.GetConnection(gi.IPAddress); conn.Ready() { + // connection has been established, now ensure that the deployment exists in the store + if deployment := cs.nginxDeployments.Get(conn.Parent); deployment != nil { + return &conn, deployment, nil + } + err = deploymentStoreErr + continue + } + err = agentConnectErr + } + } +} + +// setInitialConfig gets the initial configuration for this connection and applies it. +// The caller MUST lock the deployment before calling this. +func (cs *commandService) setInitialConfig( + ctx context.Context, + deployment *Deployment, + conn *agentgrpc.Connection, + msgr messenger.Messenger, +) error { + fileOverviews, configVersion := deployment.GetFileOverviews() + if err := msgr.Send(ctx, buildRequest(fileOverviews, conn.InstanceID, configVersion)); err != nil { + cs.logAndSendErrorStatus(deployment, conn, err) + + return grpcStatus.Error(codes.Internal, err.Error()) + } + + applyErr, connErr := cs.waitForInitialConfigApply(ctx, msgr) + if connErr != nil { + cs.logger.Error(connErr, "error setting initial configuration") + + return connErr + } + + errs := []error{applyErr} + for _, action := range deployment.GetNGINXPlusActions() { + // retry the API update request because sometimes nginx isn't quite ready after the config apply reload + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + if err := wait.PollUntilContextCancel( + timeoutCtx, + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + if err := msgr.Send(ctx, buildPlusAPIRequest(action, conn.InstanceID)); err != nil { + cs.logAndSendErrorStatus(deployment, conn, err) + + return false, grpcStatus.Error(codes.Internal, err.Error()) + } + + upstreamApplyErr, connErr := cs.waitForInitialConfigApply(ctx, msgr) + if connErr != nil { + cs.logger.Error(connErr, "error setting initial configuration") + + return false, connErr + } + + if upstreamApplyErr != nil { + return false, nil //nolint:nilerr // this error is collected at the end + } + return true, nil + }, + ); err != nil { + if strings.Contains(err.Error(), "bad response from agent") { + errs = append(errs, err) + } else { + cancel() + return err } } + cancel() } + // send the status (error or nil) to the status queue + cs.logAndSendErrorStatus(deployment, conn, errors.Join(errs...)) + + return nil } -func (cs *commandService) listenForDataPlaneResponse(ctx context.Context, in pb.CommandService_SubscribeServer) { +// waitForInitialConfigApply waits for the nginx agent to respond after a Subscriber attempts +// to apply its initial config. +// Two errors are returned +// - applyErr is an error applying the configuration +// - connectionErr is an error with the connection or sending the configuration +// The caller treats a connectionErr as unrecoverable, while the applyErr is used +// to set the status on the Gateway resources. +func (cs *commandService) waitForInitialConfigApply( + ctx context.Context, + msgr messenger.Messenger, +) (applyErr error, connectionErr error) { for { select { case <-ctx.Done(): - return - default: - dataPlaneResponse, err := in.Recv() - cs.logger.Info(fmt.Sprintf("Received data plane response: %v", dataPlaneResponse)) - if err != nil { - cs.logger.Error(err, "failed to receive data plane response") - return + return nil, grpcStatus.Error(codes.Canceled, context.Cause(ctx).Error()) + case err := <-msgr.Errors(): + if errors.Is(err, io.EOF) { + return nil, grpcStatus.Error(codes.Aborted, err.Error()) } + return nil, grpcStatus.Error(codes.Internal, err.Error()) + case msg := <-msgr.Messages(): + res := msg.GetCommandResponse() + if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { + applyErr := fmt.Errorf("bad response from agent: msg: %s; error: %s", res.GetMessage(), res.GetError()) + return applyErr, nil + } + + return applyErr, connectionErr + } + } +} + +// logAndSendErrorStatus logs an error, sets it on the Deployment object for that Pod, and then sends +// the full Deployment error status to the status queue. This ensures that any other Pod errors that already +// exist on the Deployment are not overwritten. +// If the error is nil, then we just enqueue the nil value and don't log it, which indicates success. +func (cs *commandService) logAndSendErrorStatus(deployment *Deployment, conn *agentgrpc.Connection, err error) { + if err != nil { + cs.logger.Error(err, "error sending request to agent") + } else { + cs.logger.Info("Successfully configured nginx for new subscription", "pod", conn.PodName) + } + deployment.SetPodErrorStatus(conn.PodName, err) + + queueObj := &status.QueueObject{ + Deployment: conn.Parent, + Error: deployment.GetConfigurationStatus(), + } + cs.statusQueue.Enqueue(queueObj) +} + +func buildRequest(fileOverviews []*pb.File, instanceID, version string) *pb.ManagementPlaneRequest { + return &pb.ManagementPlaneRequest{ + MessageMeta: &pb.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: uuid.NewString(), + Timestamp: timestamppb.Now(), + }, + Request: &pb.ManagementPlaneRequest_ConfigApplyRequest{ + ConfigApplyRequest: &pb.ConfigApplyRequest{ + Overview: &pb.FileOverview{ + Files: fileOverviews, + ConfigVersion: &pb.ConfigVersion{ + InstanceId: instanceID, + Version: version, + }, + }, + }, + }, + } +} + +func buildPlusAPIRequest(action *pb.NGINXPlusAction, instanceID string) *pb.ManagementPlaneRequest { + return &pb.ManagementPlaneRequest{ + MessageMeta: &pb.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: uuid.NewString(), + Timestamp: timestamppb.Now(), + }, + Request: &pb.ManagementPlaneRequest_ActionRequest{ + ActionRequest: &pb.APIActionRequest{ + InstanceId: instanceID, + Action: &pb.APIActionRequest_NginxPlusAction{ + NginxPlusAction: action, + }, + }, + }, + } +} + +func (cs *commandService) getPodOwner(podName string) (types.NamespacedName, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + var pods v1.PodList + listOpts := &client.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": podName}), + } + if err := cs.k8sReader.List(ctx, &pods, listOpts); err != nil { + return types.NamespacedName{}, fmt.Errorf("error listing pods: %w", err) + } + + if len(pods.Items) == 0 { + return types.NamespacedName{}, fmt.Errorf("no pods found with name %q", podName) + } + + if len(pods.Items) > 1 { + return types.NamespacedName{}, fmt.Errorf("should only be one pod with name %q", podName) + } + pod := pods.Items[0] + + podOwnerRefs := pod.GetOwnerReferences() + if len(podOwnerRefs) != 1 { + return types.NamespacedName{}, fmt.Errorf("expected one owner reference of the nginx Pod, got %d", len(podOwnerRefs)) + } + + if podOwnerRefs[0].Kind != "ReplicaSet" { + err := fmt.Errorf("expected pod owner reference to be ReplicaSet, got %s", podOwnerRefs[0].Kind) + return types.NamespacedName{}, err + } + + var replicaSet appsv1.ReplicaSet + if err := cs.k8sReader.Get( + ctx, + types.NamespacedName{Namespace: pod.Namespace, Name: podOwnerRefs[0].Name}, + &replicaSet, + ); err != nil { + return types.NamespacedName{}, fmt.Errorf("failed to get nginx Pod's ReplicaSet: %w", err) + } + + replicaOwnerRefs := replicaSet.GetOwnerReferences() + if len(replicaOwnerRefs) != 1 { + err := fmt.Errorf("expected one owner reference of the nginx ReplicaSet, got %d", len(replicaOwnerRefs)) + return types.NamespacedName{}, err + } + + return types.NamespacedName{Namespace: pod.Namespace, Name: replicaOwnerRefs[0].Name}, nil +} + +// UpdateDataPlaneStatus is called by agent on startup and upon any change in agent metadata, +// instance metadata, or configurations. InstanceID may not be set on an initial CreateConnection, +// and will instead be set on a call to UpdateDataPlaneStatus once the agent discovers its nginx instance. +func (cs *commandService) UpdateDataPlaneStatus( + ctx context.Context, + req *pb.UpdateDataPlaneStatusRequest, +) (*pb.UpdateDataPlaneStatusResponse, error) { + if req == nil { + return nil, errors.New("empty UpdateDataPlaneStatus request") + } + + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + instanceID := getNginxInstanceID(req.GetResource().GetInstances()) + if instanceID == "" { + return nil, grpcStatus.Errorf(codes.InvalidArgument, "request does not contain nginx instanceID") + } + + cs.connTracker.SetInstanceID(gi.IPAddress, instanceID) + + return &pb.UpdateDataPlaneStatusResponse{}, nil +} + +func getNginxInstanceID(instances []*pb.Instance) string { + for _, instance := range instances { + instanceType := instance.GetInstanceMeta().GetInstanceType() + if instanceType == pb.InstanceMeta_INSTANCE_TYPE_NGINX || + instanceType == pb.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS { + return instance.GetInstanceMeta().GetInstanceId() } } + + return "" } // UpdateDataPlaneHealth includes full health information about the data plane as reported by the agent. @@ -145,13 +492,3 @@ func (cs *commandService) UpdateDataPlaneHealth( ) (*pb.UpdateDataPlaneHealthResponse, error) { return &pb.UpdateDataPlaneHealthResponse{}, nil } - -// UpdateDataPlaneStatus is called by agent on startup and upon any change in agent metadata, -// instance metadata, or configurations. Since directly changing nginx configuration on the instance -// is not supported, this is a no-op for NGF. -func (cs *commandService) UpdateDataPlaneStatus( - _ context.Context, - _ *pb.UpdateDataPlaneStatusRequest, -) (*pb.UpdateDataPlaneStatusResponse, error) { - return &pb.UpdateDataPlaneStatusResponse{}, nil -} diff --git a/internal/mode/static/nginx/agent/command_test.go b/internal/mode/static/nginx/agent/command_test.go new file mode 100644 index 0000000000..340c4deda9 --- /dev/null +++ b/internal/mode/static/nginx/agent/command_test.go @@ -0,0 +1,905 @@ +package agent + +import ( + "context" + "errors" + "io" + "testing" + "time" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +type mockSubscribeServer struct { + grpc.ServerStream + ctx context.Context + recvChan chan *pb.DataPlaneResponse + sendChan chan *pb.ManagementPlaneRequest +} + +func newMockSubscribeServer(ctx context.Context) *mockSubscribeServer { + return &mockSubscribeServer{ + ctx: ctx, + recvChan: make(chan *pb.DataPlaneResponse, 1), + sendChan: make(chan *pb.ManagementPlaneRequest, 1), + } +} + +func (m *mockSubscribeServer) Send(msg *pb.ManagementPlaneRequest) error { + m.sendChan <- msg + return nil +} + +func (m *mockSubscribeServer) Recv() (*pb.DataPlaneResponse, error) { + req, ok := <-m.recvChan + if !ok { + return nil, io.EOF + } + return req, nil +} + +func (m *mockSubscribeServer) Context() context.Context { + return m.ctx +} + +func createFakeK8sClient(initObjs ...runtime.Object) (client.Client, error) { + fakeClient := fake.NewFakeClient(initObjs...) + if err := fake.AddIndex(fakeClient, &v1.Pod{}, "metadata.name", func(obj client.Object) []string { + return []string{obj.GetName()} + }); err != nil { + return nil, err + } + + return fakeClient, nil +} + +func createGrpcContext() context.Context { + return grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) +} + +func createGrpcContextWithCancel() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + return grpcContext.NewGrpcContext(ctx, grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }), cancel +} + +func TestCreateConnection(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + request *pb.CreateConnectionRequest + response *pb.CreateConnectionResponse + ctx context.Context + errString string + }{ + { + name: "successfully tracks a connection", + ctx: createGrpcContext(), + request: &pb.CreateConnectionRequest{ + Resource: &pb.Resource{ + Info: &pb.Resource_ContainerInfo{ + ContainerInfo: &pb.ContainerInfo{ + Hostname: "nginx-pod", + }, + }, + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX, + }, + }, + }, + }, + }, + response: &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + }, + }, + { + name: "request is nil", + request: nil, + response: nil, + errString: "empty connection request", + }, + { + name: "context is missing data", + ctx: context.Background(), + request: &pb.CreateConnectionRequest{}, + response: nil, + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "error getting pod owner", + ctx: createGrpcContext(), + request: &pb.CreateConnectionRequest{ + Resource: &pb.Resource{ + Info: &pb.Resource_ContainerInfo{ + ContainerInfo: &pb.ContainerInfo{ + Hostname: "nginx-pod", + }, + }, + }, + }, + response: &pb.CreateConnectionResponse{ + Response: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_ERROR, + Message: "error getting pod owner", + Error: "no pods found with name \"nginx-pod\"", + }, + }, + errString: "error getting pod owner", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + var objs []runtime.Object + if test.errString == "" { + pod := &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + } + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + }, + }, + } + + objs = []runtime.Object{pod, replicaSet} + } + + fakeClient, err := createFakeK8sClient(objs...) + g.Expect(err).ToNot(HaveOccurred()) + + cs := newCommandService( + logr.Discard(), + fakeClient, + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + ) + + resp, err := cs.CreateConnection(test.ctx, test.request) + g.Expect(resp).To(Equal(test.response)) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(connTracker.TrackCallCount()).To(Equal(1)) + + expConn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + + key, conn := connTracker.TrackArgsForCall(0) + g.Expect(key).To(Equal("127.0.0.1")) + g.Expect(conn).To(Equal(expConn)) + }) + } +} + +func ensureFileWasSent( + g *WithT, + server *mockSubscribeServer, + expFile *pb.File, +) { + var req *pb.ManagementPlaneRequest + g.Eventually(func() *pb.ManagementPlaneRequest { + req = <-server.sendChan + return req + }).ShouldNot(BeNil()) + + g.Expect(req.GetConfigApplyRequest()).ToNot(BeNil()) + overview := req.GetConfigApplyRequest().GetOverview() + g.Expect(overview).ToNot(BeNil()) + g.Expect(overview.Files).To(ContainElement(expFile)) +} + +func ensureAPIRequestWasSent( + g *WithT, + server *mockSubscribeServer, + expAction *pb.NGINXPlusAction, +) { + var req *pb.ManagementPlaneRequest + g.Eventually(func() *pb.ManagementPlaneRequest { + req = <-server.sendChan + return req + }).ShouldNot(BeNil()) + + g.Expect(req.GetActionRequest()).ToNot(BeNil()) + action := req.GetActionRequest().GetNginxPlusAction() + g.Expect(action).To(Equal(expAction)) +} + +func verifyResponse( + g *WithT, + server *mockSubscribeServer, + responseCh chan struct{}, +) { + server.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + g.Eventually(func() struct{} { + return <-responseCh + }).Should(Equal(struct{}{})) +} + +func TestSubscribe(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + connTracker.GetConnectionReturns(conn) + + store := NewDeploymentStore(&connTracker) + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + store, + &connTracker, + status.NewQueue(), + ) + + broadcaster := &broadcastfakes.FakeBroadcaster{} + responseCh := make(chan struct{}) + listenCh := make(chan broadcast.NginxAgentMessage, 2) + subChannels := broadcast.SubscriberChannels{ + ListenCh: listenCh, + ResponseCh: responseCh, + } + broadcaster.SubscribeReturns(subChannels) + + // set the initial files and actions to be applied by the Subscription + deployment := store.StoreWithBroadcaster(conn.Parent, broadcaster) + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + Contents: []byte("file contents"), + }, + } + deployment.SetFiles(files) + + initialAction := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + } + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{initialAction}) + + ctx, cancel := createGrpcContextWithCancel() + defer cancel() + + mockServer := newMockSubscribeServer(ctx) + + // put the requests on the listenCh for the Subscription loop to pick up + loopFile := &pb.File{ + FileMeta: &pb.FileMeta{ + Name: "some-other.conf", + Hash: "56789", + }, + } + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.ConfigApplyRequest, + FileOverviews: []*pb.File{loopFile}, + } + + loopAction := &pb.NGINXPlusAction{ + Action: &pb.NGINXPlusAction_UpdateStreamServers{}, + } + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.APIRequest, + NGINXPlusAction: loopAction, + } + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + // ensure that the initial config file was sent when the Subscription connected + expFile := &pb.File{ + FileMeta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + } + ensureFileWasSent(g, mockServer, expFile) + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + // ensure that the initial API request was sent when the Subscription connected + ensureAPIRequestWasSent(g, mockServer, initialAction) + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + g.Eventually(func() string { + obj := cs.statusQueue.Dequeue(ctx) + return obj.Deployment.Name + }).Should(Equal("nginx-deployment")) + + // ensure the second file was sent in the loop + ensureFileWasSent(g, mockServer, loopFile) + verifyResponse(g, mockServer, responseCh) + + // ensure the second action was sent in the loop + ensureAPIRequestWasSent(g, mockServer, loopAction) + verifyResponse(g, mockServer, responseCh) + + cancel() + + g.Eventually(func() error { + return <-errCh + }).Should(MatchError(ContainSubstring("context canceled"))) +} + +func TestSubscribe_Errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func( + cs *commandService, + ct *agentgrpcfakes.FakeConnectionsTracker, + ) + ctx context.Context + errString string + }{ + { + name: "context is missing data", + ctx: context.Background(), + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "error waiting for connection; not connected", + setup: func( + cs *commandService, + _ *agentgrpcfakes.FakeConnectionsTracker, + ) { + cs.connectionTimeout = 1100 * time.Millisecond + }, + errString: "timed out waiting for agent to register nginx", + }, + { + name: "error waiting for connection; deployment not tracked", + setup: func( + cs *commandService, + ct *agentgrpcfakes.FakeConnectionsTracker, + ) { + ct.GetConnectionReturns(agentgrpc.Connection{InstanceID: "nginx-id"}) + cs.connectionTimeout = 1100 * time.Millisecond + }, + errString: "timed out waiting for nginx deployment to be added to store", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + ) + + if test.setup != nil { + test.setup(cs, &connTracker) + } + + var ctx context.Context + var cancel context.CancelFunc + + if test.ctx != nil { + ctx = test.ctx + } else { + ctx, cancel = createGrpcContextWithCancel() + defer cancel() + } + + mockServer := newMockSubscribeServer(ctx) + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + g.Eventually(func() error { + err := <-errCh + g.Expect(err).To(HaveOccurred()) + return err + }).Should(MatchError(ContainSubstring(test.errString))) + }) + } +} + +func TestSetInitialConfig_Errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) + errString string + }{ + { + name: "error sending initial config", + setup: func(msgr *messengerfakes.FakeMessenger, _ *Deployment) { + msgr.SendReturns(errors.New("send error")) + }, + errString: "send error", + }, + { + name: "error waiting for initial config apply", + setup: func(msgr *messengerfakes.FakeMessenger, _ *Deployment) { + errCh := make(chan error, 1) + msgr.ErrorsReturns(errCh) + errCh <- errors.New("apply error") + }, + errString: "apply error", + }, + { + name: "error sending initial API request", + setup: func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) { + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + }) + msgCh := make(chan *pb.DataPlaneResponse, 1) + msgr.MessagesReturns(msgCh) + msgCh <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + msgr.SendReturnsOnCall(1, errors.New("api send error")) + }, + errString: "api send error", + }, + { + name: "error waiting for initial API request apply", + setup: func(msgr *messengerfakes.FakeMessenger, deployment *Deployment) { + deployment.SetNGINXPlusActions([]*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + }) + msgCh := make(chan *pb.DataPlaneResponse, 1) + msgr.MessagesReturns(msgCh) + msgCh <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + errCh := make(chan error, 1) + msgr.ErrorsReturns(errCh) + errCh <- errors.New("api apply error") + }, + errString: "api apply error", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + msgr := &messengerfakes.FakeMessenger{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + ) + + conn := &agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + if test.setup != nil { + test.setup(msgr, deployment) + } + + err := cs.setInitialConfig(context.Background(), deployment, conn, msgr) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + }) + } +} + +func TestGetPodOwner(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + podName string + podList *v1.PodList + replicaSet *appsv1.ReplicaSet + errString string + expected types.NamespacedName + }{ + { + name: "successfully gets pod owner", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + }, + }, + }, + expected: types.NamespacedName{ + Namespace: "test", + Name: "nginx-deployment", + }, + }, + { + name: "error listing pods", + podName: "nginx-pod", + podList: &v1.PodList{}, + replicaSet: &appsv1.ReplicaSet{}, + errString: "no pods found", + }, + { + name: "multiple pods with same name", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "nginx-pod"}}, + {ObjectMeta: metav1.ObjectMeta{Namespace: "test2", Name: "nginx-pod"}}, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "should only be one pod with name", + }, + { + name: "pod owner reference is not ReplicaSet", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Owner", + Name: "nginx-owner", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "expected pod owner reference to be ReplicaSet", + }, + { + name: "pod has multiple owners", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + { + Kind: "ReplicaSet", + Name: "nginx-replicaset2", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{}, + errString: "expected one owner reference of the nginx Pod", + }, + { + name: "replicaSet has multiple owners", + podName: "nginx-pod", + podList: &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "nginx-replicaset", + }, + }, + }, + }, + }, + }, + replicaSet: &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-replicaset", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "nginx-deployment", + }, + { + Kind: "Deployment", + Name: "nginx-deployment2", + }, + }, + }, + }, + errString: "expected one owner reference of the nginx ReplicaSet", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeClient, err := createFakeK8sClient(test.podList, test.replicaSet) + g.Expect(err).ToNot(HaveOccurred()) + + cs := newCommandService( + logr.Discard(), + fakeClient, + NewDeploymentStore(nil), + nil, + status.NewQueue(), + ) + + owner, err := cs.getPodOwner(test.podName) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + g.Expect(owner).To(Equal(types.NamespacedName{})) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(owner).To(Equal(test.expected)) + }) + } +} + +func TestUpdateDataPlaneStatus(t *testing.T) { + t.Parallel() + + tests := []struct { + request *pb.UpdateDataPlaneStatusRequest + response *pb.UpdateDataPlaneStatusResponse + ctx context.Context + errString string + expID string + name string + }{ + { + name: "successfully sets the status", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{ + Resource: &pb.Resource{ + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX, + }, + }, + }, + }, + }, + expID: "nginx-id", + response: &pb.UpdateDataPlaneStatusResponse{}, + }, + { + name: "successfully sets the status using plus", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{ + Resource: &pb.Resource{ + Instances: []*pb.Instance{ + { + InstanceMeta: &pb.InstanceMeta{ + InstanceId: "nginx-plus-id", + InstanceType: pb.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS, + }, + }, + }, + }, + }, + expID: "nginx-plus-id", + response: &pb.UpdateDataPlaneStatusResponse{}, + }, + { + name: "request is nil", + request: nil, + response: nil, + errString: "empty UpdateDataPlaneStatus request", + }, + { + name: "context is missing data", + ctx: context.Background(), + request: &pb.UpdateDataPlaneStatusRequest{}, + response: nil, + errString: agentgrpc.ErrStatusInvalidConnection.Error(), + }, + { + name: "request does not contain ID", + ctx: createGrpcContext(), + request: &pb.UpdateDataPlaneStatusRequest{}, + response: nil, + errString: "request does not contain nginx instanceID", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + ) + + resp, err := cs.UpdateDataPlaneStatus(test.ctx, test.request) + + if test.errString != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(test.errString)) + g.Expect(resp).To(BeNil()) + + g.Expect(connTracker.SetInstanceIDCallCount()).To(Equal(0)) + + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(test.response)) + + g.Expect(connTracker.SetInstanceIDCallCount()).To(Equal(1)) + + key, id := connTracker.SetInstanceIDArgsForCall(0) + g.Expect(key).To(Equal("127.0.0.1")) + g.Expect(id).To(Equal(test.expID)) + }) + } +} + +func TestUpdateDataPlaneHealth(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + NewDeploymentStore(&connTracker), + &connTracker, + status.NewQueue(), + ) + + resp, err := cs.UpdateDataPlaneHealth(context.Background(), &pb.UpdateDataPlaneHealthRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateDataPlaneHealthResponse{})) +} diff --git a/internal/mode/static/nginx/agent/deployment.go b/internal/mode/static/nginx/agent/deployment.go new file mode 100644 index 0000000000..c0bd2bca1d --- /dev/null +++ b/internal/mode/static/nginx/agent/deployment.go @@ -0,0 +1,263 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "sync" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" + "k8s.io/apimachinery/pkg/types" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" +) + +// ignoreFiles is a list of static or base files that live in the +// nginx container that should not be touched by the agent. Any files +// that we add directly into the container should be added here. +var ignoreFiles = []string{ + "/etc/nginx/nginx.conf", + "/etc/nginx/mime.types", + "/etc/nginx/grpc-error-locations.conf", + "/etc/nginx/grpc-error-pages.conf", + "/usr/share/nginx/html/50x.html", + "/usr/share/nginx/html/dashboard.html", + "/usr/share/nginx/html/index.html", + "/usr/share/nginx/html/nginx-modules-reference.pdf", +} + +const fileMode = "0644" + +// Deployment represents an nginx Deployment. It contains its own nginx configuration files, +// a broadcaster for sending those files to all of its pods that are subscribed, and errors +// that may have occurred while applying configuration. +type Deployment struct { + // podStatuses is a map of all Pods for this Deployment and the most recent error + // (or nil if successful) that occurred on a config call to the nginx agent. + podStatuses map[string]error + + broadcaster broadcast.Broadcaster + + configVersion string + // error that is set if a ConfigApply call failed for a Pod. This is needed + // because if subsequent upstream API calls are made within the same update event, + // and are successful, the previous error would be lost in the podStatuses map. + // It's used to preserve the error for when we write status after fully updating nginx. + latestConfigError error + // error that is set when at least one upstream API call failed for a Pod. + // This is needed because subsequent API calls within the same update event could succeed, + // and therefore the previous error would be lost in the podStatuses map. It's used to preserve + // the error for when we write status after fully updating nginx. + latestUpstreamError error + + nginxPlusActions []*pb.NGINXPlusAction + fileOverviews []*pb.File + files []File + + Lock sync.RWMutex +} + +// newDeployment returns a new Deployment object. +func newDeployment(broadcaster broadcast.Broadcaster) *Deployment { + return &Deployment{ + broadcaster: broadcaster, + podStatuses: make(map[string]error), + } +} + +// GetBroadcaster returns the deployment's broadcaster. +func (d *Deployment) GetBroadcaster() broadcast.Broadcaster { + return d.broadcaster +} + +// GetFileOverviews returns the current list of fileOverviews and configVersion for the deployment. +func (d *Deployment) GetFileOverviews() ([]*pb.File, string) { + d.Lock.RLock() + defer d.Lock.RUnlock() + + return d.fileOverviews, d.configVersion +} + +// GetNGINXPlusActions returns the current NGINX Plus API Actions for the deployment. +func (d *Deployment) GetNGINXPlusActions() []*pb.NGINXPlusAction { + d.Lock.RLock() + defer d.Lock.RUnlock() + + return d.nginxPlusActions +} + +// GetLatestConfigError gets the latest config apply error for the deployment. +func (d *Deployment) GetLatestConfigError() error { + d.Lock.RLock() + defer d.Lock.RUnlock() + + return d.latestConfigError +} + +// GetLatestUpstreamError gets the latest upstream update error for the deployment. +func (d *Deployment) GetLatestUpstreamError() error { + d.Lock.RLock() + defer d.Lock.RUnlock() + + return d.latestUpstreamError +} + +/* +The following functions for the Deployment object are UNLOCKED, meaning that they are unsafe. +Callers of these functions MUST ensure the lock is set before calling. + +These functions are called as part of the ConfigApply or APIRequest processes. These entire processes +are locked by the caller, hence why the functions themselves do not set the locks. +*/ + +// GetFile gets the requested file for the deployment and returns its contents. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) GetFile(name, hash string) []byte { + for _, file := range d.files { + if name == file.Meta.GetName() && hash == file.Meta.GetHash() { + return file.Contents + } + } + + return nil +} + +// SetFiles updates the nginx files and fileOverviews for the deployment and returns the message to send. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) SetFiles(files []File) broadcast.NginxAgentMessage { + d.files = files + + fileOverviews := make([]*pb.File, 0, len(files)) + for _, file := range files { + fileOverviews = append(fileOverviews, &pb.File{FileMeta: file.Meta}) + } + + // add ignored files to the overview as 'unmanaged' so agent doesn't touch them + for _, f := range ignoreFiles { + meta := &pb.FileMeta{ + Name: f, + Permissions: fileMode, + } + + fileOverviews = append(fileOverviews, &pb.File{ + FileMeta: meta, + Unmanaged: true, + }) + } + + d.configVersion = filesHelper.GenerateConfigVersion(fileOverviews) + d.fileOverviews = fileOverviews + + return broadcast.NginxAgentMessage{ + Type: broadcast.ConfigApplyRequest, + FileOverviews: fileOverviews, + ConfigVersion: d.configVersion, + } +} + +// SetNGINXPlusActions updates the deployment's latest NGINX Plus Actions to perform if using NGINX Plus. +// Used by a Subscriber when it first connects. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) SetNGINXPlusActions(actions []*pb.NGINXPlusAction) { + d.nginxPlusActions = actions +} + +// SetPodErrorStatus sets the error status of a Pod in this Deployment if applying the config failed. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) SetPodErrorStatus(pod string, err error) { + d.podStatuses[pod] = err +} + +// SetLatestConfigError sets the latest config apply error for the deployment. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) SetLatestConfigError(err error) { + d.latestConfigError = err +} + +// SetLatestUpstreamError sets the latest upstream update error for the deployment. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) SetLatestUpstreamError(err error) { + d.latestUpstreamError = err +} + +// GetConfigurationStatus returns the current config status for this Deployment. It combines +// the most recent errors (if they exist) for all Pods in the Deployment into a single error. +// The deployment MUST already be locked before calling this function. +func (d *Deployment) GetConfigurationStatus() error { + errs := make([]error, 0, len(d.podStatuses)) + for _, err := range d.podStatuses { + errs = append(errs, err) + } + + if len(errs) == 1 { + return errs[0] + } + + return errors.Join(errs...) +} + +// DeploymentStore holds a map of all Deployments. +type DeploymentStore struct { + connTracker agentgrpc.ConnectionsTracker + deployments sync.Map +} + +// NewDeploymentStore returns a new instance of a DeploymentStore. +func NewDeploymentStore(connTracker agentgrpc.ConnectionsTracker) *DeploymentStore { + return &DeploymentStore{ + connTracker: connTracker, + } +} + +// Get returns the desired deployment from the store. +func (d *DeploymentStore) Get(nsName types.NamespacedName) *Deployment { + val, ok := d.deployments.Load(nsName) + if !ok { + return nil + } + + deployment, ok := val.(*Deployment) + if !ok { + panic(fmt.Sprintf("expected Deployment, got type %T", val)) + } + + return deployment +} + +// GetOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +func (d *DeploymentStore) GetOrStore( + ctx context.Context, + nsName types.NamespacedName, + stopCh chan struct{}, +) *Deployment { + if deployment := d.Get(nsName); deployment != nil { + return deployment + } + + deployment := newDeployment(broadcast.NewDeploymentBroadcaster(ctx, stopCh)) + d.deployments.Store(nsName, deployment) + + return deployment +} + +// StoreWithBroadcaster creates a new Deployment with the supplied broadcaster and stores it. +// Used in unit tests to provide a mock broadcaster. +func (d *DeploymentStore) StoreWithBroadcaster( + nsName types.NamespacedName, + broadcaster broadcast.Broadcaster, +) *Deployment { + deployment := newDeployment(broadcaster) + d.deployments.Store(nsName, deployment) + + return deployment +} + +// Remove cleans up any connections that are tracked for this deployment, and then removes +// the deployment from the store. +func (d *DeploymentStore) Remove(nsName types.NamespacedName) { + d.connTracker.UntrackConnectionsForParent(nsName) + d.deployments.Delete(nsName) +} diff --git a/internal/mode/static/nginx/agent/deployment_test.go b/internal/mode/static/nginx/agent/deployment_test.go new file mode 100644 index 0000000000..e4881b9934 --- /dev/null +++ b/internal/mode/static/nginx/agent/deployment_test.go @@ -0,0 +1,137 @@ +package agent + +import ( + "context" + "errors" + "testing" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/broadcast/broadcastfakes" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" +) + +func TestNewDeployment(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + g.Expect(deployment).ToNot(BeNil()) + + g.Expect(deployment.GetBroadcaster()).ToNot(BeNil()) + g.Expect(deployment.GetFileOverviews()).To(BeEmpty()) + g.Expect(deployment.GetNGINXPlusActions()).To(BeEmpty()) + g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) + g.Expect(deployment.GetLatestUpstreamError()).ToNot(HaveOccurred()) +} + +func TestSetAndGetFiles(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + }, + } + + msg := deployment.SetFiles(files) + fileOverviews, configVersion := deployment.GetFileOverviews() + + g.Expect(msg.Type).To(Equal(broadcast.ConfigApplyRequest)) + g.Expect(msg.ConfigVersion).To(Equal(configVersion)) + g.Expect(msg.FileOverviews).To(HaveLen(9)) // 1 file + 8 ignored files + g.Expect(fileOverviews).To(Equal(msg.FileOverviews)) + + file := deployment.GetFile("test.conf", "12345") + g.Expect(file).To(Equal([]byte("test content"))) + + g.Expect(deployment.GetFile("invalid", "12345")).To(BeNil()) + g.Expect(deployment.GetFile("test.conf", "invalid")).To(BeNil()) +} + +func TestSetNGINXPlusActions(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + actions := []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{}, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{}, + }, + } + + deployment.SetNGINXPlusActions(actions) + g.Expect(deployment.GetNGINXPlusActions()).To(Equal(actions)) +} + +func TestSetPodErrorStatus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + err2 := errors.New("test error 2") + deployment.SetPodErrorStatus("test-pod", err) + deployment.SetPodErrorStatus("test-pod2", err2) + + g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error"))) + g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error 2"))) +} + +func TestSetLatestConfigError(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + deployment.SetLatestConfigError(err) + g.Expect(deployment.GetLatestConfigError()).To(MatchError(err)) +} + +func TestSetLatestUpstreamError(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deployment := newDeployment(&broadcastfakes.FakeBroadcaster{}) + + err := errors.New("test error") + deployment.SetLatestUpstreamError(err) + g.Expect(deployment.GetLatestUpstreamError()).To(MatchError(err)) +} + +func TestDeploymentStore(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := NewDeploymentStore(&agentgrpcfakes.FakeConnectionsTracker{}) + + nsName := types.NamespacedName{Namespace: "default", Name: "test-deployment"} + + deployment := store.GetOrStore(context.Background(), nsName, nil) + g.Expect(deployment).ToNot(BeNil()) + + fetchedDeployment := store.Get(nsName) + g.Expect(fetchedDeployment).To(Equal(deployment)) + + deployment = store.GetOrStore(context.Background(), nsName, nil) + g.Expect(fetchedDeployment).To(Equal(deployment)) + + store.Remove(nsName) + g.Expect(store.Get(nsName)).To(BeNil()) +} diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go index 296e1705ee..a4163ea187 100644 --- a/internal/mode/static/nginx/agent/file.go +++ b/internal/mode/static/nginx/agent/file.go @@ -2,51 +2,93 @@ package agent import ( "context" - "fmt" "github.com/go-logr/logr" pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" ) +// File is an nginx configuration file that the nginx agent gets from the control plane +// after a ConfigApplyRequest. +type File struct { + Meta *pb.FileMeta + Contents []byte +} + // fileService handles file management between the control plane and the agent. type fileService struct { pb.FileServiceServer + nginxDeployments *DeploymentStore + connTracker agentgrpc.ConnectionsTracker // TODO(sberman): all logs are at Info level right now. Adjust appropriately. logger logr.Logger } -func newFileService(logger logr.Logger) *fileService { - return &fileService{logger: logger} +func newFileService( + logger logr.Logger, + depStore *DeploymentStore, + connTracker agentgrpc.ConnectionsTracker, +) *fileService { + return &fileService{ + logger: logger, + nginxDeployments: depStore, + connTracker: connTracker, + } } func (fs *fileService) Register(server *grpc.Server) { pb.RegisterFileServiceServer(server, fs) } -// GetOverview gets the overview of files for a particular configuration version of an instance. -// Agent calls this if it's missing an overview when a ConfigApplyRequest is called by the control plane. -func (fs *fileService) GetOverview( - _ context.Context, - _ *pb.GetOverviewRequest, -) (*pb.GetOverviewResponse, error) { - fs.logger.Info("Get overview request") - - return &pb.GetOverviewResponse{ - Overview: &pb.FileOverview{}, - }, nil -} - // GetFile is called by the agent when it needs to download a file for a ConfigApplyRequest. +// The deployment object used to get the files is already LOCKED when this function is called, +// before the ConfigApply transaction is started. func (fs *fileService) GetFile( - _ context.Context, + ctx context.Context, req *pb.GetFileRequest, ) (*pb.GetFileResponse, error) { filename := req.GetFileMeta().GetName() hash := req.GetFileMeta().GetHash() - fs.logger.Info(fmt.Sprintf("Getting file: %s, %s", filename, hash)) - return &pb.GetFileResponse{}, nil + gi, ok := grpcContext.GrpcInfoFromContext(ctx) + if !ok { + return nil, agentgrpc.ErrStatusInvalidConnection + } + + conn := fs.connTracker.GetConnection(gi.IPAddress) + if conn.PodName == "" { + return nil, status.Errorf(codes.NotFound, "connection not found") + } + + deployment := fs.nginxDeployments.Get(conn.Parent) + if deployment == nil { + return nil, status.Errorf(codes.NotFound, "deployment not found in store") + } + + contents := deployment.GetFile(filename, hash) + if len(contents) == 0 { + return nil, status.Errorf(codes.NotFound, "file not found") + } + + return &pb.GetFileResponse{ + Contents: &pb.FileContents{ + Contents: contents, + }, + }, nil +} + +// GetOverview gets the overview of files for a particular configuration version of an instance. +// At the moment it doesn't appear to be used by the agent. +func (fs *fileService) GetOverview( + _ context.Context, + _ *pb.GetOverviewRequest, +) (*pb.GetOverviewResponse, error) { + return &pb.GetOverviewResponse{}, nil } // UpdateOverview is called by agent on startup and whenever any files change on the instance. diff --git a/internal/mode/static/nginx/agent/file_test.go b/internal/mode/static/nginx/agent/file_test.go new file mode 100644 index 0000000000..1e683eb214 --- /dev/null +++ b/internal/mode/static/nginx/agent/file_test.go @@ -0,0 +1,209 @@ +package agent + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/types" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" + agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" +) + +func TestGetFile(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + depStore := NewDeploymentStore(connTracker) + dep := depStore.GetOrStore(context.Background(), deploymentName, nil) + + fileMeta := &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + } + contents := []byte("test contents") + + dep.files = []File{ + { + Meta: fileMeta, + Contents: contents, + }, + } + + fs := newFileService(logr.Discard(), depStore, connTracker) + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + req := &pb.GetFileRequest{ + FileMeta: fileMeta, + } + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).ToNot(BeNil()) + g.Expect(resp.GetContents()).ToNot(BeNil()) + g.Expect(resp.GetContents().GetContents()).To(Equal(contents)) +} + +func TestGetFile_InvalidConnection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + resp, err := fs.GetFile(context.Background(), req) + + g.Expect(err).To(Equal(agentgrpc.ErrStatusInvalidConnection)) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_ConnectionNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, &agentgrpcfakes.FakeConnectionsTracker{}) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "connection not found"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_DeploymentNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + fs := newFileService(logr.Discard(), NewDeploymentStore(connTracker), connTracker) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "deployment not found in store"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetFile_FileNotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + deploymentName := types.NamespacedName{Name: "nginx-deployment", Namespace: "default"} + + connTracker := &agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + PodName: "nginx-pod", + InstanceID: "12345", + Parent: deploymentName, + } + connTracker.GetConnectionReturns(conn) + + depStore := NewDeploymentStore(connTracker) + depStore.GetOrStore(context.Background(), deploymentName, nil) + + fs := newFileService(logr.Discard(), depStore, connTracker) + + req := &pb.GetFileRequest{ + FileMeta: &pb.FileMeta{ + Name: "test.conf", + Hash: "some-hash", + }, + } + + ctx := grpcContext.NewGrpcContext(context.Background(), grpcContext.GrpcInfo{ + IPAddress: "127.0.0.1", + }) + + resp, err := fs.GetFile(ctx, req) + + g.Expect(err).To(Equal(status.Errorf(codes.NotFound, "file not found"))) + g.Expect(resp).To(BeNil()) +} + +func TestGetOverview(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.GetOverview(context.Background(), &pb.GetOverviewRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.GetOverviewResponse{})) +} + +func TestUpdateOverview(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.UpdateOverview(context.Background(), &pb.UpdateOverviewRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateOverviewResponse{})) +} + +func TestUpdateFile(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fs := newFileService(logr.Discard(), nil, nil) + resp, err := fs.UpdateFile(context.Background(), &pb.UpdateFileRequest{}) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resp).To(Equal(&pb.UpdateFileResponse{})) +} diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go index af99b84002..8f1adc2c75 100644 --- a/internal/mode/static/nginx/agent/grpc/connections.go +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -2,23 +2,48 @@ package grpc import ( "sync" + + "k8s.io/apimachinery/pkg/types" ) -// ConnectionsTracker keeps track of all connections between the control plane and nginx agents. -type ConnectionsTracker struct { - // connections contains a map of all IP addresses that have connected and their associated pod names. - // TODO(sberman): we'll likely need to create a channel for each connection that can be stored in this map. - // Then the Subscription listens on the channel for its connection, while the nginxUpdater sends the config - // for the pod over that channel. - connections map[string]string +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +//counterfeiter:generate . ConnectionsTracker + +// ConnectionsTracker defines an interface to track all connections between the control plane +// and nginx agents. +type ConnectionsTracker interface { + Track(key string, conn Connection) + GetConnection(key string) Connection + SetInstanceID(key, id string) + UntrackConnectionsForParent(parent types.NamespacedName) +} + +// Connection contains the data about a single nginx agent connection. +type Connection struct { + PodName string + InstanceID string + Parent types.NamespacedName +} + +// Ready returns if the connection is ready to be used. In other words, agent +// has registered itself and an nginx instance with the control plane. +func (c *Connection) Ready() bool { + return c.InstanceID != "" +} + +// AgentConnectionsTracker keeps track of all connections between the control plane and nginx agents. +type AgentConnectionsTracker struct { + // connections contains a map of all IP addresses that have connected and their connection info. + connections map[string]Connection - lock sync.Mutex + lock sync.RWMutex } -// NewConnectionsTracker returns a new ConnectionsTracker instance. -func NewConnectionsTracker() *ConnectionsTracker { - return &ConnectionsTracker{ - connections: make(map[string]string), +// NewConnectionsTracker returns a new AgentConnectionsTracker instance. +func NewConnectionsTracker() ConnectionsTracker { + return &AgentConnectionsTracker{ + connections: make(map[string]Connection), } } @@ -26,25 +51,40 @@ func NewConnectionsTracker() *ConnectionsTracker { // TODO(sberman): we need to handle the case when the token expires (once we support the token). // This likely involves setting a callback to cancel a context when the token expires, which triggers // the connection to be removed from the tracking list. -func (c *ConnectionsTracker) Track(address, hostname string) { +func (c *AgentConnectionsTracker) Track(key string, conn Connection) { c.lock.Lock() defer c.lock.Unlock() - c.connections[address] = hostname + c.connections[key] = conn } -// GetConnections returns all connections that are currently tracked. -func (c *ConnectionsTracker) GetConnections() map[string]string { +// GetConnection returns the requested connection. +func (c *AgentConnectionsTracker) GetConnection(key string) Connection { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.connections[key] +} + +// SetInstanceID sets the nginx instanceID for a connection. +func (c *AgentConnectionsTracker) SetInstanceID(key, id string) { c.lock.Lock() defer c.lock.Unlock() - return c.connections + if conn, ok := c.connections[key]; ok { + conn.InstanceID = id + c.connections[key] = conn + } } -// GetConnection returns the hostname of the requested connection. -func (c *ConnectionsTracker) GetConnection(address string) string { +// UntrackConnectionsForParent removes all Connections that reference the specified parent. +func (c *AgentConnectionsTracker) UntrackConnectionsForParent(parent types.NamespacedName) { c.lock.Lock() defer c.lock.Unlock() - return c.connections[address] + for key, conn := range c.connections { + if conn.Parent == parent { + delete(c.connections, key) + } + } } diff --git a/internal/mode/static/nginx/agent/grpc/connections_test.go b/internal/mode/static/nginx/agent/grpc/connections_test.go new file mode 100644 index 0000000000..be0ca18a8b --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/connections_test.go @@ -0,0 +1,99 @@ +package grpc_test + +import ( + "testing" + + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + agentgrpc "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" +) + +func TestGetConnection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) + + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn).To(Equal(conn)) + + nonExistent := tracker.GetConnection("nonexistent") + g.Expect(nonExistent).To(Equal(agentgrpc.Connection{})) +} + +func TestConnectionIsReady(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + + g.Expect(conn.Ready()).To(BeTrue()) +} + +func TestConnectionIsNotReady(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conn := agentgrpc.Connection{ + PodName: "pod1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + + g.Expect(conn.Ready()).To(BeFalse()) +} + +func TestSetInstanceID(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + conn := agentgrpc.Connection{ + PodName: "pod1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) + + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn.Ready()).To(BeFalse()) + + tracker.SetInstanceID("key1", "instance1") + + trackedConn = tracker.GetConnection("key1") + g.Expect(trackedConn.Ready()).To(BeTrue()) + g.Expect(trackedConn.InstanceID).To(Equal("instance1")) +} + +func TestUntrackConnectionsForParent(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tracker := agentgrpc.NewConnectionsTracker() + + parent1 := types.NamespacedName{Namespace: "default", Name: "parent1"} + conn1 := agentgrpc.Connection{PodName: "pod1", InstanceID: "instance1", Parent: parent1} + conn2 := agentgrpc.Connection{PodName: "pod2", InstanceID: "instance2", Parent: parent1} + + parent2 := types.NamespacedName{Namespace: "default", Name: "parent2"} + conn3 := agentgrpc.Connection{PodName: "pod3", InstanceID: "instance3", Parent: parent2} + + tracker.Track("key1", conn1) + tracker.Track("key2", conn2) + tracker.Track("key3", conn3) + + tracker.UntrackConnectionsForParent(parent1) + g.Expect(tracker.GetConnection("key1")).To(Equal(agentgrpc.Connection{})) + g.Expect(tracker.GetConnection("key2")).To(Equal(agentgrpc.Connection{})) + g.Expect(tracker.GetConnection("key3")).To(Equal(conn3)) +} diff --git a/internal/mode/static/nginx/agent/grpc/context/context_test.go b/internal/mode/static/nginx/agent/grpc/context/context_test.go new file mode 100644 index 0000000000..57acf9152f --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/context/context_test.go @@ -0,0 +1,31 @@ +package context_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + + grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" +) + +func TestGrpcInfoInContext(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + grpcInfo := grpcContext.GrpcInfo{IPAddress: "192.168.1.1"} + + newCtx := grpcContext.NewGrpcContext(context.Background(), grpcInfo) + info, ok := grpcContext.GrpcInfoFromContext(newCtx) + g.Expect(ok).To(BeTrue()) + g.Expect(info).To(Equal(grpcInfo)) +} + +func TestGrpcInfoNotInContext(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + info, ok := grpcContext.GrpcInfoFromContext(context.Background()) + g.Expect(ok).To(BeFalse()) + g.Expect(info).To(Equal(grpcContext.GrpcInfo{})) +} diff --git a/internal/mode/static/nginx/agent/grpc/grpc.go b/internal/mode/static/nginx/agent/grpc/grpc.go index 11c57ec3e9..a4f2a31268 100644 --- a/internal/mode/static/nginx/agent/grpc/grpc.go +++ b/internal/mode/static/nginx/agent/grpc/grpc.go @@ -17,7 +17,7 @@ import ( ) const ( - keepAliveTime = 10 * time.Second + keepAliveTime = 15 * time.Second keepAliveTimeout = 10 * time.Second ) @@ -82,7 +82,8 @@ func (g *Server) Start(ctx context.Context) error { go func() { <-ctx.Done() g.logger.Info("Shutting down GRPC Server") - server.GracefulStop() + // Since we use a long-lived stream, GracefulStop does not terminate. Therefore we use Stop. + server.Stop() }() return server.Serve(listener) diff --git a/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go new file mode 100644 index 0000000000..a82da0a5a2 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go @@ -0,0 +1,233 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package grpcfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" + "k8s.io/apimachinery/pkg/types" +) + +type FakeConnectionsTracker struct { + GetConnectionStub func(string) grpc.Connection + getConnectionMutex sync.RWMutex + getConnectionArgsForCall []struct { + arg1 string + } + getConnectionReturns struct { + result1 grpc.Connection + } + getConnectionReturnsOnCall map[int]struct { + result1 grpc.Connection + } + SetInstanceIDStub func(string, string) + setInstanceIDMutex sync.RWMutex + setInstanceIDArgsForCall []struct { + arg1 string + arg2 string + } + TrackStub func(string, grpc.Connection) + trackMutex sync.RWMutex + trackArgsForCall []struct { + arg1 string + arg2 grpc.Connection + } + UntrackConnectionsForParentStub func(types.NamespacedName) + untrackConnectionsForParentMutex sync.RWMutex + untrackConnectionsForParentArgsForCall []struct { + arg1 types.NamespacedName + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeConnectionsTracker) GetConnection(arg1 string) grpc.Connection { + fake.getConnectionMutex.Lock() + ret, specificReturn := fake.getConnectionReturnsOnCall[len(fake.getConnectionArgsForCall)] + fake.getConnectionArgsForCall = append(fake.getConnectionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetConnectionStub + fakeReturns := fake.getConnectionReturns + fake.recordInvocation("GetConnection", []interface{}{arg1}) + fake.getConnectionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeConnectionsTracker) GetConnectionCallCount() int { + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + return len(fake.getConnectionArgsForCall) +} + +func (fake *FakeConnectionsTracker) GetConnectionCalls(stub func(string) grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = stub +} + +func (fake *FakeConnectionsTracker) GetConnectionArgsForCall(i int) string { + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + argsForCall := fake.getConnectionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeConnectionsTracker) GetConnectionReturns(result1 grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = nil + fake.getConnectionReturns = struct { + result1 grpc.Connection + }{result1} +} + +func (fake *FakeConnectionsTracker) GetConnectionReturnsOnCall(i int, result1 grpc.Connection) { + fake.getConnectionMutex.Lock() + defer fake.getConnectionMutex.Unlock() + fake.GetConnectionStub = nil + if fake.getConnectionReturnsOnCall == nil { + fake.getConnectionReturnsOnCall = make(map[int]struct { + result1 grpc.Connection + }) + } + fake.getConnectionReturnsOnCall[i] = struct { + result1 grpc.Connection + }{result1} +} + +func (fake *FakeConnectionsTracker) SetInstanceID(arg1 string, arg2 string) { + fake.setInstanceIDMutex.Lock() + fake.setInstanceIDArgsForCall = append(fake.setInstanceIDArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.SetInstanceIDStub + fake.recordInvocation("SetInstanceID", []interface{}{arg1, arg2}) + fake.setInstanceIDMutex.Unlock() + if stub != nil { + fake.SetInstanceIDStub(arg1, arg2) + } +} + +func (fake *FakeConnectionsTracker) SetInstanceIDCallCount() int { + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + return len(fake.setInstanceIDArgsForCall) +} + +func (fake *FakeConnectionsTracker) SetInstanceIDCalls(stub func(string, string)) { + fake.setInstanceIDMutex.Lock() + defer fake.setInstanceIDMutex.Unlock() + fake.SetInstanceIDStub = stub +} + +func (fake *FakeConnectionsTracker) SetInstanceIDArgsForCall(i int) (string, string) { + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + argsForCall := fake.setInstanceIDArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeConnectionsTracker) Track(arg1 string, arg2 grpc.Connection) { + fake.trackMutex.Lock() + fake.trackArgsForCall = append(fake.trackArgsForCall, struct { + arg1 string + arg2 grpc.Connection + }{arg1, arg2}) + stub := fake.TrackStub + fake.recordInvocation("Track", []interface{}{arg1, arg2}) + fake.trackMutex.Unlock() + if stub != nil { + fake.TrackStub(arg1, arg2) + } +} + +func (fake *FakeConnectionsTracker) TrackCallCount() int { + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + return len(fake.trackArgsForCall) +} + +func (fake *FakeConnectionsTracker) TrackCalls(stub func(string, grpc.Connection)) { + fake.trackMutex.Lock() + defer fake.trackMutex.Unlock() + fake.TrackStub = stub +} + +func (fake *FakeConnectionsTracker) TrackArgsForCall(i int) (string, grpc.Connection) { + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + argsForCall := fake.trackArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeConnectionsTracker) UntrackConnectionsForParent(arg1 types.NamespacedName) { + fake.untrackConnectionsForParentMutex.Lock() + fake.untrackConnectionsForParentArgsForCall = append(fake.untrackConnectionsForParentArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.UntrackConnectionsForParentStub + fake.recordInvocation("UntrackConnectionsForParent", []interface{}{arg1}) + fake.untrackConnectionsForParentMutex.Unlock() + if stub != nil { + fake.UntrackConnectionsForParentStub(arg1) + } +} + +func (fake *FakeConnectionsTracker) UntrackConnectionsForParentCallCount() int { + fake.untrackConnectionsForParentMutex.RLock() + defer fake.untrackConnectionsForParentMutex.RUnlock() + return len(fake.untrackConnectionsForParentArgsForCall) +} + +func (fake *FakeConnectionsTracker) UntrackConnectionsForParentCalls(stub func(types.NamespacedName)) { + fake.untrackConnectionsForParentMutex.Lock() + defer fake.untrackConnectionsForParentMutex.Unlock() + fake.UntrackConnectionsForParentStub = stub +} + +func (fake *FakeConnectionsTracker) UntrackConnectionsForParentArgsForCall(i int) types.NamespacedName { + fake.untrackConnectionsForParentMutex.RLock() + defer fake.untrackConnectionsForParentMutex.RUnlock() + argsForCall := fake.untrackConnectionsForParentArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeConnectionsTracker) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getConnectionMutex.RLock() + defer fake.getConnectionMutex.RUnlock() + fake.setInstanceIDMutex.RLock() + defer fake.setInstanceIDMutex.RUnlock() + fake.trackMutex.RLock() + defer fake.trackMutex.RUnlock() + fake.untrackConnectionsForParentMutex.RLock() + defer fake.untrackConnectionsForParentMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeConnectionsTracker) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ grpc.ConnectionsTracker = new(FakeConnectionsTracker) diff --git a/internal/mode/static/nginx/agent/grpc/messenger/doc.go b/internal/mode/static/nginx/agent/grpc/messenger/doc.go new file mode 100644 index 0000000000..60150e4ad8 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/doc.go @@ -0,0 +1,4 @@ +/* +Package messenger provides a wrapper around a gRPC stream with the nginx agent. +*/ +package messenger diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messenger.go b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go new file mode 100644 index 0000000000..dde16c74f3 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go @@ -0,0 +1,111 @@ +package messenger + +import ( + "context" + "errors" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +//counterfeiter:generate . Messenger + +// Messenger is a wrapper around a gRPC stream with the nginx agent. +type Messenger interface { + Run(context.Context) + Send(context.Context, *pb.ManagementPlaneRequest) error + Messages() <-chan *pb.DataPlaneResponse + Errors() <-chan error +} + +// NginxAgentMessenger is the implementation of the Messenger interface. +type NginxAgentMessenger struct { + incoming chan *pb.ManagementPlaneRequest + outgoing chan *pb.DataPlaneResponse + errorCh chan error + server pb.CommandService_SubscribeServer +} + +// New returns a new Messenger instance. +func New(server pb.CommandService_SubscribeServer) Messenger { + return &NginxAgentMessenger{ + incoming: make(chan *pb.ManagementPlaneRequest), + outgoing: make(chan *pb.DataPlaneResponse), + errorCh: make(chan error), + server: server, + } +} + +// Run starts the Messenger to listen for any Send() or Recv() events over the stream. +func (m *NginxAgentMessenger) Run(ctx context.Context) { + go m.handleRecv(ctx) + m.handleSend(ctx) +} + +// Send a message, will return error if the context is Done. +func (m *NginxAgentMessenger) Send(ctx context.Context, msg *pb.ManagementPlaneRequest) error { + select { + case <-ctx.Done(): + return ctx.Err() + case m.incoming <- msg: + } + return nil +} + +func (m *NginxAgentMessenger) handleSend(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case msg := <-m.incoming: + err := m.server.Send(msg) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(ctx.Err(), context.Canceled) { + return + } + m.errorCh <- err + + return + } + } + } +} + +// Messages returns the data plane response channel. +func (m *NginxAgentMessenger) Messages() <-chan *pb.DataPlaneResponse { + return m.outgoing +} + +// Errors returns the error channel. +func (m *NginxAgentMessenger) Errors() <-chan error { + return m.errorCh +} + +// handleRecv handles an incoming message from the nginx agent. +// It blocks until Recv returns. The result from the Recv is either going to Error or Messages channel. +func (m *NginxAgentMessenger) handleRecv(ctx context.Context) { + for { + msg, err := m.server.Recv() + if err != nil { + select { + case <-ctx.Done(): + return + case m.errorCh <- err: + } + return + } + + if msg == nil { + // close the outgoing channel to signal no more messages to be sent + close(m.outgoing) + return + } + + select { + case <-ctx.Done(): + return + case m.outgoing <- msg: + } + } +} diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go b/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go new file mode 100644 index 0000000000..275f2ed875 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messenger_test.go @@ -0,0 +1,125 @@ +package messenger_test + +import ( + "context" + "errors" + "testing" + + v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" +) + +type mockServer struct { + grpc.ServerStream + sendChan chan *v1.ManagementPlaneRequest + recvChan chan *v1.DataPlaneResponse +} + +func (m *mockServer) Send(msg *v1.ManagementPlaneRequest) error { + m.sendChan <- msg + return nil +} + +func (m *mockServer) Recv() (*v1.DataPlaneResponse, error) { + msg, ok := <-m.recvChan + if !ok { + return nil, errors.New("channel closed") + } + return msg, nil +} + +type mockErrorServer struct { + grpc.ServerStream + sendChan chan *v1.ManagementPlaneRequest + recvChan chan *v1.DataPlaneResponse +} + +func (m *mockErrorServer) Send(_ *v1.ManagementPlaneRequest) error { + return errors.New("error sending to server") +} + +func (m *mockErrorServer) Recv() (*v1.DataPlaneResponse, error) { + <-m.recvChan + return nil, errors.New("error received from server") +} + +func createServer() *mockServer { + return &mockServer{ + sendChan: make(chan *v1.ManagementPlaneRequest, 1), + recvChan: make(chan *v1.DataPlaneResponse, 1), + } +} + +func createErrorServer() *mockErrorServer { + return &mockErrorServer{ + sendChan: make(chan *v1.ManagementPlaneRequest, 1), + recvChan: make(chan *v1.DataPlaneResponse, 1), + } +} + +func TestSend(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + msg := &v1.ManagementPlaneRequest{ + MessageMeta: &v1.MessageMeta{ + MessageId: "test", + }, + } + g.Expect(msgr.Send(ctx, msg)).To(Succeed()) + + g.Eventually(server.sendChan).Should(Receive(Equal(msg))) + + cancel() + + g.Expect(msgr.Send(ctx, &v1.ManagementPlaneRequest{})).ToNot(Succeed()) +} + +func TestMessages(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + msg := &v1.DataPlaneResponse{InstanceId: "test"} + server.recvChan <- msg + + g.Eventually(msgr.Messages()).Should(Receive(Equal(msg))) +} + +func TestErrors(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + server := createErrorServer() + msgr := messenger.New(server) + + go msgr.Run(ctx) + + g.Expect(msgr.Send(ctx, &v1.ManagementPlaneRequest{})).To(Succeed()) + g.Eventually(msgr.Errors()).Should(Receive(MatchError("error sending to server"))) + + server.recvChan <- &v1.DataPlaneResponse{} + + g.Eventually(msgr.Errors()).Should(Receive(MatchError("error received from server"))) +} diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go b/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go new file mode 100644 index 0000000000..6b6a97bef9 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/messenger/messengerfakes/fake_messenger.go @@ -0,0 +1,284 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package messengerfakes + +import ( + "context" + "sync" + + v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/messenger" +) + +type FakeMessenger struct { + ErrorsStub func() <-chan error + errorsMutex sync.RWMutex + errorsArgsForCall []struct { + } + errorsReturns struct { + result1 <-chan error + } + errorsReturnsOnCall map[int]struct { + result1 <-chan error + } + MessagesStub func() <-chan *v1.DataPlaneResponse + messagesMutex sync.RWMutex + messagesArgsForCall []struct { + } + messagesReturns struct { + result1 <-chan *v1.DataPlaneResponse + } + messagesReturnsOnCall map[int]struct { + result1 <-chan *v1.DataPlaneResponse + } + RunStub func(context.Context) + runMutex sync.RWMutex + runArgsForCall []struct { + arg1 context.Context + } + SendStub func(context.Context, *v1.ManagementPlaneRequest) error + sendMutex sync.RWMutex + sendArgsForCall []struct { + arg1 context.Context + arg2 *v1.ManagementPlaneRequest + } + sendReturns struct { + result1 error + } + sendReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeMessenger) Errors() <-chan error { + fake.errorsMutex.Lock() + ret, specificReturn := fake.errorsReturnsOnCall[len(fake.errorsArgsForCall)] + fake.errorsArgsForCall = append(fake.errorsArgsForCall, struct { + }{}) + stub := fake.ErrorsStub + fakeReturns := fake.errorsReturns + fake.recordInvocation("Errors", []interface{}{}) + fake.errorsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) ErrorsCallCount() int { + fake.errorsMutex.RLock() + defer fake.errorsMutex.RUnlock() + return len(fake.errorsArgsForCall) +} + +func (fake *FakeMessenger) ErrorsCalls(stub func() <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = stub +} + +func (fake *FakeMessenger) ErrorsReturns(result1 <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = nil + fake.errorsReturns = struct { + result1 <-chan error + }{result1} +} + +func (fake *FakeMessenger) ErrorsReturnsOnCall(i int, result1 <-chan error) { + fake.errorsMutex.Lock() + defer fake.errorsMutex.Unlock() + fake.ErrorsStub = nil + if fake.errorsReturnsOnCall == nil { + fake.errorsReturnsOnCall = make(map[int]struct { + result1 <-chan error + }) + } + fake.errorsReturnsOnCall[i] = struct { + result1 <-chan error + }{result1} +} + +func (fake *FakeMessenger) Messages() <-chan *v1.DataPlaneResponse { + fake.messagesMutex.Lock() + ret, specificReturn := fake.messagesReturnsOnCall[len(fake.messagesArgsForCall)] + fake.messagesArgsForCall = append(fake.messagesArgsForCall, struct { + }{}) + stub := fake.MessagesStub + fakeReturns := fake.messagesReturns + fake.recordInvocation("Messages", []interface{}{}) + fake.messagesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) MessagesCallCount() int { + fake.messagesMutex.RLock() + defer fake.messagesMutex.RUnlock() + return len(fake.messagesArgsForCall) +} + +func (fake *FakeMessenger) MessagesCalls(stub func() <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = stub +} + +func (fake *FakeMessenger) MessagesReturns(result1 <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = nil + fake.messagesReturns = struct { + result1 <-chan *v1.DataPlaneResponse + }{result1} +} + +func (fake *FakeMessenger) MessagesReturnsOnCall(i int, result1 <-chan *v1.DataPlaneResponse) { + fake.messagesMutex.Lock() + defer fake.messagesMutex.Unlock() + fake.MessagesStub = nil + if fake.messagesReturnsOnCall == nil { + fake.messagesReturnsOnCall = make(map[int]struct { + result1 <-chan *v1.DataPlaneResponse + }) + } + fake.messagesReturnsOnCall[i] = struct { + result1 <-chan *v1.DataPlaneResponse + }{result1} +} + +func (fake *FakeMessenger) Run(arg1 context.Context) { + fake.runMutex.Lock() + fake.runArgsForCall = append(fake.runArgsForCall, struct { + arg1 context.Context + }{arg1}) + stub := fake.RunStub + fake.recordInvocation("Run", []interface{}{arg1}) + fake.runMutex.Unlock() + if stub != nil { + fake.RunStub(arg1) + } +} + +func (fake *FakeMessenger) RunCallCount() int { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + return len(fake.runArgsForCall) +} + +func (fake *FakeMessenger) RunCalls(stub func(context.Context)) { + fake.runMutex.Lock() + defer fake.runMutex.Unlock() + fake.RunStub = stub +} + +func (fake *FakeMessenger) RunArgsForCall(i int) context.Context { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + argsForCall := fake.runArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeMessenger) Send(arg1 context.Context, arg2 *v1.ManagementPlaneRequest) error { + fake.sendMutex.Lock() + ret, specificReturn := fake.sendReturnsOnCall[len(fake.sendArgsForCall)] + fake.sendArgsForCall = append(fake.sendArgsForCall, struct { + arg1 context.Context + arg2 *v1.ManagementPlaneRequest + }{arg1, arg2}) + stub := fake.SendStub + fakeReturns := fake.sendReturns + fake.recordInvocation("Send", []interface{}{arg1, arg2}) + fake.sendMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeMessenger) SendCallCount() int { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + return len(fake.sendArgsForCall) +} + +func (fake *FakeMessenger) SendCalls(stub func(context.Context, *v1.ManagementPlaneRequest) error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = stub +} + +func (fake *FakeMessenger) SendArgsForCall(i int) (context.Context, *v1.ManagementPlaneRequest) { + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + argsForCall := fake.sendArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeMessenger) SendReturns(result1 error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + fake.sendReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeMessenger) SendReturnsOnCall(i int, result1 error) { + fake.sendMutex.Lock() + defer fake.sendMutex.Unlock() + fake.SendStub = nil + if fake.sendReturnsOnCall == nil { + fake.sendReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.sendReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeMessenger) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.errorsMutex.RLock() + defer fake.errorsMutex.RUnlock() + fake.messagesMutex.RLock() + defer fake.messagesMutex.RUnlock() + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + fake.sendMutex.RLock() + defer fake.sendMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeMessenger) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ messenger.Messenger = new(FakeMessenger) diff --git a/internal/mode/static/nginx/config/configfakes/fake_generator.go b/internal/mode/static/nginx/config/configfakes/fake_generator.go index fab0755398..0dc5ac408f 100644 --- a/internal/mode/static/nginx/config/configfakes/fake_generator.go +++ b/internal/mode/static/nginx/config/configfakes/fake_generator.go @@ -4,41 +4,41 @@ package configfakes import ( "sync" - "github.com/nginx/nginx-gateway-fabric/internal/framework/file" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" ) type FakeGenerator struct { - GenerateStub func(dataplane.Configuration) []file.File + GenerateStub func(dataplane.Configuration) []agent.File generateMutex sync.RWMutex generateArgsForCall []struct { arg1 dataplane.Configuration } generateReturns struct { - result1 []file.File + result1 []agent.File } generateReturnsOnCall map[int]struct { - result1 []file.File + result1 []agent.File } - GenerateDeploymentContextStub func(dataplane.DeploymentContext) (file.File, error) + GenerateDeploymentContextStub func(dataplane.DeploymentContext) (agent.File, error) generateDeploymentContextMutex sync.RWMutex generateDeploymentContextArgsForCall []struct { arg1 dataplane.DeploymentContext } generateDeploymentContextReturns struct { - result1 file.File + result1 agent.File result2 error } generateDeploymentContextReturnsOnCall map[int]struct { - result1 file.File + result1 agent.File result2 error } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []file.File { +func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []agent.File { fake.generateMutex.Lock() ret, specificReturn := fake.generateReturnsOnCall[len(fake.generateArgsForCall)] fake.generateArgsForCall = append(fake.generateArgsForCall, struct { @@ -63,7 +63,7 @@ func (fake *FakeGenerator) GenerateCallCount() int { return len(fake.generateArgsForCall) } -func (fake *FakeGenerator) GenerateCalls(stub func(dataplane.Configuration) []file.File) { +func (fake *FakeGenerator) GenerateCalls(stub func(dataplane.Configuration) []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = stub @@ -76,30 +76,30 @@ func (fake *FakeGenerator) GenerateArgsForCall(i int) dataplane.Configuration { return argsForCall.arg1 } -func (fake *FakeGenerator) GenerateReturns(result1 []file.File) { +func (fake *FakeGenerator) GenerateReturns(result1 []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = nil fake.generateReturns = struct { - result1 []file.File + result1 []agent.File }{result1} } -func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []file.File) { +func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []agent.File) { fake.generateMutex.Lock() defer fake.generateMutex.Unlock() fake.GenerateStub = nil if fake.generateReturnsOnCall == nil { fake.generateReturnsOnCall = make(map[int]struct { - result1 []file.File + result1 []agent.File }) } fake.generateReturnsOnCall[i] = struct { - result1 []file.File + result1 []agent.File }{result1} } -func (fake *FakeGenerator) GenerateDeploymentContext(arg1 dataplane.DeploymentContext) (file.File, error) { +func (fake *FakeGenerator) GenerateDeploymentContext(arg1 dataplane.DeploymentContext) (agent.File, error) { fake.generateDeploymentContextMutex.Lock() ret, specificReturn := fake.generateDeploymentContextReturnsOnCall[len(fake.generateDeploymentContextArgsForCall)] fake.generateDeploymentContextArgsForCall = append(fake.generateDeploymentContextArgsForCall, struct { @@ -124,7 +124,7 @@ func (fake *FakeGenerator) GenerateDeploymentContextCallCount() int { return len(fake.generateDeploymentContextArgsForCall) } -func (fake *FakeGenerator) GenerateDeploymentContextCalls(stub func(dataplane.DeploymentContext) (file.File, error)) { +func (fake *FakeGenerator) GenerateDeploymentContextCalls(stub func(dataplane.DeploymentContext) (agent.File, error)) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = stub @@ -137,28 +137,28 @@ func (fake *FakeGenerator) GenerateDeploymentContextArgsForCall(i int) dataplane return argsForCall.arg1 } -func (fake *FakeGenerator) GenerateDeploymentContextReturns(result1 file.File, result2 error) { +func (fake *FakeGenerator) GenerateDeploymentContextReturns(result1 agent.File, result2 error) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = nil fake.generateDeploymentContextReturns = struct { - result1 file.File + result1 agent.File result2 error }{result1, result2} } -func (fake *FakeGenerator) GenerateDeploymentContextReturnsOnCall(i int, result1 file.File, result2 error) { +func (fake *FakeGenerator) GenerateDeploymentContextReturnsOnCall(i int, result1 agent.File, result2 error) { fake.generateDeploymentContextMutex.Lock() defer fake.generateDeploymentContextMutex.Unlock() fake.GenerateDeploymentContextStub = nil if fake.generateDeploymentContextReturnsOnCall == nil { fake.generateDeploymentContextReturnsOnCall = make(map[int]struct { - result1 file.File + result1 agent.File result2 error }) } fake.generateDeploymentContextReturnsOnCall[i] = struct { - result1 file.File + result1 agent.File result2 error }{result1, result2} } diff --git a/internal/mode/static/nginx/config/generator.go b/internal/mode/static/nginx/config/generator.go index 3fd5eac10a..5e92544c2d 100644 --- a/internal/mode/static/nginx/config/generator.go +++ b/internal/mode/static/nginx/config/generator.go @@ -6,9 +6,12 @@ import ( "path/filepath" "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" "github.com/nginx/nginx-gateway-fabric/internal/framework/file" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/http" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/clientsettings" @@ -64,9 +67,9 @@ const ( // This interface is used for testing purposes only. type Generator interface { // Generate generates NGINX configuration files from internal representation. - Generate(configuration dataplane.Configuration) []file.File + Generate(configuration dataplane.Configuration) []agent.File // GenerateDeploymentContext generates the deployment context used for N+ licensing. - GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (file.File, error) + GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (agent.File, error) } // GeneratorImpl is an implementation of Generator. @@ -106,8 +109,8 @@ type executeFunc func(configuration dataplane.Configuration) []executeResult // It is the responsibility of the caller to validate the configuration before calling this function. // In case of invalid configuration, NGINX will fail to reload or could be configured with malicious configuration. // To validate, use the validators from the validation package. -func (g GeneratorImpl) Generate(conf dataplane.Configuration) []file.File { - files := make([]file.File, 0) +func (g GeneratorImpl) Generate(conf dataplane.Configuration) []agent.File { + files := make([]agent.File, 0) for id, pair := range conf.SSLKeyPairs { files = append(files, generatePEM(id, pair.Cert, pair.Key)) @@ -129,16 +132,19 @@ func (g GeneratorImpl) Generate(conf dataplane.Configuration) []file.File { // GenerateDeploymentContext generates the deployment_ctx.json file needed for N+ licensing. // It's exported since it's used by the init container process. -func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (file.File, error) { +func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentContext) (agent.File, error) { depCtxBytes, err := json.Marshal(depCtx) if err != nil { - return file.File{}, fmt.Errorf("error building deployment context for mgmt block: %w", err) + return agent.File{}, fmt.Errorf("error building deployment context for mgmt block: %w", err) } - deploymentCtxFile := file.File{ - Content: depCtxBytes, - Path: mainIncludesFolder + "/deployment_ctx.json", - Type: file.TypeRegular, + deploymentCtxFile := agent.File{ + Meta: &pb.FileMeta{ + Name: mainIncludesFolder + "/deployment_ctx.json", + Hash: filesHelper.GenerateHash(depCtxBytes), + Permissions: file.RegularFileMode, + }, + Contents: depCtxBytes, } return deploymentCtxFile, nil @@ -147,7 +153,7 @@ func (g GeneratorImpl) GenerateDeploymentContext(depCtx dataplane.DeploymentCont func (g GeneratorImpl) executeConfigTemplates( conf dataplane.Configuration, generator policies.Generator, -) []file.File { +) []agent.File { fileBytes := make(map[string][]byte) httpUpstreams := g.createUpstreams(conf.Upstreams, upstreamsettings.NewProcessor()) @@ -160,17 +166,20 @@ func (g GeneratorImpl) executeConfigTemplates( } } - var mgmtFiles []file.File + var mgmtFiles []agent.File if g.plus { mgmtFiles = g.generateMgmtFiles(conf) } - files := make([]file.File, 0, len(fileBytes)+len(mgmtFiles)) + files := make([]agent.File, 0, len(fileBytes)+len(mgmtFiles)) for fp, bytes := range fileBytes { - files = append(files, file.File{ - Path: fp, - Content: bytes, - Type: file.TypeRegular, + files = append(files, agent.File{ + Meta: &pb.FileMeta{ + Name: fp, + Hash: filesHelper.GenerateHash(bytes), + Permissions: file.RegularFileMode, + }, + Contents: bytes, }) } files = append(files, mgmtFiles...) @@ -198,16 +207,19 @@ func (g GeneratorImpl) getExecuteFuncs( } } -func generatePEM(id dataplane.SSLKeyPairID, cert []byte, key []byte) file.File { +func generatePEM(id dataplane.SSLKeyPairID, cert []byte, key []byte) agent.File { c := make([]byte, 0, len(cert)+len(key)+1) c = append(c, cert...) c = append(c, '\n') c = append(c, key...) - return file.File{ - Content: c, - Path: generatePEMFileName(id), - Type: file.TypeSecret, + return agent.File{ + Meta: &pb.FileMeta{ + Name: generatePEMFileName(id), + Hash: filesHelper.GenerateHash(c), + Permissions: file.SecretFileMode, + }, + Contents: c, } } @@ -215,11 +227,14 @@ func generatePEMFileName(id dataplane.SSLKeyPairID) string { return filepath.Join(secretsFolder, string(id)+".pem") } -func generateCertBundle(id dataplane.CertBundleID, cert []byte) file.File { - return file.File{ - Content: cert, - Path: generateCertBundleFileName(id), - Type: file.TypeRegular, +func generateCertBundle(id dataplane.CertBundleID, cert []byte) agent.File { + return agent.File{ + Meta: &pb.FileMeta{ + Name: generateCertBundleFileName(id), + Hash: filesHelper.GenerateHash(cert), + Permissions: file.SecretFileMode, + }, + Contents: cert, } } diff --git a/internal/mode/static/nginx/config/generator_test.go b/internal/mode/static/nginx/config/generator_test.go index 92fb02f403..9e0fe140fa 100644 --- a/internal/mode/static/nginx/config/generator_test.go +++ b/internal/mode/static/nginx/config/generator_test.go @@ -5,12 +5,15 @@ import ( "testing" "github.com/go-logr/logr" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" ngfConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -146,7 +149,7 @@ func TestGenerate(t *testing.T) { g.Expect(files).To(HaveLen(17)) arrange := func(i, j int) bool { - return files[i].Path < files[j].Path + return files[i].Meta.Name < files[j].Meta.Name } sort.Slice(files, arrange) @@ -171,9 +174,9 @@ func TestGenerate(t *testing.T) { /etc/nginx/stream-conf.d/stream.conf */ - g.Expect(files[0].Type).To(Equal(file.TypeRegular)) - g.Expect(files[0].Path).To(Equal("/etc/nginx/conf.d/http.conf")) - httpCfg := string(files[0].Content) // converting to string so that on failure gomega prints strings not byte arrays + g.Expect(files[0].Meta.Permissions).To(Equal(file.RegularFileMode)) + g.Expect(files[0].Meta.Name).To(Equal("/etc/nginx/conf.d/http.conf")) + httpCfg := string(files[0].Contents) // converting to string so that on failure gomega prints strings not byte arrays // Note: this only verifies that Generate() returns a byte array with upstream, server, and split_client blocks. // It does not test the correctness of those blocks. That functionality is covered by other tests in this package. g.Expect(httpCfg).To(ContainSubstring("listen 80")) @@ -190,14 +193,14 @@ func TestGenerate(t *testing.T) { g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet1.conf;")) g.Expect(httpCfg).To(ContainSubstring("include /etc/nginx/includes/http_snippet2.conf;")) - g.Expect(files[1].Path).To(Equal("/etc/nginx/conf.d/matches.json")) - g.Expect(files[1].Type).To(Equal(file.TypeRegular)) + g.Expect(files[1].Meta.Name).To(Equal("/etc/nginx/conf.d/matches.json")) + g.Expect(files[1].Meta.Permissions).To(Equal(file.RegularFileMode)) expString := "{}" - g.Expect(string(files[1].Content)).To(Equal(expString)) + g.Expect(string(files[1].Contents)).To(Equal(expString)) - g.Expect(files[2].Path).To(Equal("/etc/nginx/conf.d/plus-api.conf")) - g.Expect(files[2].Type).To(Equal(file.TypeRegular)) - httpCfg = string(files[2].Content) + g.Expect(files[2].Meta.Name).To(Equal("/etc/nginx/conf.d/plus-api.conf")) + g.Expect(files[2].Meta.Permissions).To(Equal(file.RegularFileMode)) + httpCfg = string(files[2].Contents) g.Expect(httpCfg).To(ContainSubstring("listen unix:/var/run/nginx/nginx-plus-api.sock;")) g.Expect(httpCfg).To(ContainSubstring("access_log off;")) g.Expect(httpCfg).To(ContainSubstring("listen 8765;")) @@ -210,26 +213,26 @@ func TestGenerate(t *testing.T) { // snippet include files // content is not checked in this test. - g.Expect(files[3].Path).To(Equal("/etc/nginx/includes/http_snippet1.conf")) - g.Expect(files[4].Path).To(Equal("/etc/nginx/includes/http_snippet2.conf")) - g.Expect(files[5].Path).To(Equal("/etc/nginx/includes/main_snippet1.conf")) - g.Expect(files[6].Path).To(Equal("/etc/nginx/includes/main_snippet2.conf")) + g.Expect(files[3].Meta.Name).To(Equal("/etc/nginx/includes/http_snippet1.conf")) + g.Expect(files[4].Meta.Name).To(Equal("/etc/nginx/includes/http_snippet2.conf")) + g.Expect(files[5].Meta.Name).To(Equal("/etc/nginx/includes/main_snippet1.conf")) + g.Expect(files[6].Meta.Name).To(Equal("/etc/nginx/includes/main_snippet2.conf")) - g.Expect(files[7].Path).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) - deploymentCtx := string(files[7].Content) + g.Expect(files[7].Meta.Name).To(Equal("/etc/nginx/main-includes/deployment_ctx.json")) + deploymentCtx := string(files[7].Contents) g.Expect(deploymentCtx).To(ContainSubstring("\"integration\":\"ngf\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_id\":\"test-uid\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"installation_id\":\"test-uid-replicaSet\"")) g.Expect(deploymentCtx).To(ContainSubstring("\"cluster_node_count\":1")) - g.Expect(files[8].Path).To(Equal("/etc/nginx/main-includes/main.conf")) - mainConfStr := string(files[8].Content) + g.Expect(files[8].Meta.Name).To(Equal("/etc/nginx/main-includes/main.conf")) + mainConfStr := string(files[8].Contents) g.Expect(mainConfStr).To(ContainSubstring("load_module modules/ngx_otel_module.so;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet1.conf;")) g.Expect(mainConfStr).To(ContainSubstring("include /etc/nginx/includes/main_snippet2.conf;")) - g.Expect(files[9].Path).To(Equal("/etc/nginx/main-includes/mgmt.conf")) - mgmtConf := string(files[9].Content) + g.Expect(files[9].Meta.Name).To(Equal("/etc/nginx/main-includes/mgmt.conf")) + mgmtConf := string(files[9].Contents) g.Expect(mgmtConf).To(ContainSubstring("usage_report endpoint=test-endpoint")) g.Expect(mgmtConf).To(ContainSubstring("license_token /etc/nginx/secrets/license.jwt")) g.Expect(mgmtConf).To(ContainSubstring("deployment_context /etc/nginx/main-includes/deployment_ctx.json")) @@ -237,31 +240,34 @@ func TestGenerate(t *testing.T) { g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate /etc/nginx/secrets/mgmt-tls.crt")) g.Expect(mgmtConf).To(ContainSubstring("ssl_certificate_key /etc/nginx/secrets/mgmt-tls.key")) - g.Expect(files[10].Path).To(Equal("/etc/nginx/secrets/license.jwt")) - g.Expect(string(files[10].Content)).To(Equal("license")) + g.Expect(files[10].Meta.Name).To(Equal("/etc/nginx/secrets/license.jwt")) + g.Expect(string(files[10].Contents)).To(Equal("license")) - g.Expect(files[11].Path).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) - g.Expect(string(files[11].Content)).To(Equal("ca")) + g.Expect(files[11].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-ca.crt")) + g.Expect(string(files[11].Contents)).To(Equal("ca")) - g.Expect(files[12].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) - g.Expect(string(files[12].Content)).To(Equal("cert")) + g.Expect(files[12].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-tls.crt")) + g.Expect(string(files[12].Contents)).To(Equal("cert")) - g.Expect(files[13].Path).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) - g.Expect(string(files[13].Content)).To(Equal("key")) + g.Expect(files[13].Meta.Name).To(Equal("/etc/nginx/secrets/mgmt-tls.key")) + g.Expect(string(files[13].Contents)).To(Equal("key")) - g.Expect(files[14].Path).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) - certBundle := string(files[14].Content) + g.Expect(files[14].Meta.Name).To(Equal("/etc/nginx/secrets/test-certbundle.crt")) + certBundle := string(files[14].Contents) g.Expect(certBundle).To(Equal("test-cert")) - g.Expect(files[15]).To(Equal(file.File{ - Type: file.TypeSecret, - Path: "/etc/nginx/secrets/test-keypair.pem", - Content: []byte("test-cert\ntest-key"), + g.Expect(files[15]).To(Equal(agent.File{ + Meta: &pb.FileMeta{ + Name: "/etc/nginx/secrets/test-keypair.pem", + Hash: filesHelper.GenerateHash([]byte("test-cert\ntest-key")), + Permissions: file.SecretFileMode, + }, + Contents: []byte("test-cert\ntest-key"), })) - g.Expect(files[16].Path).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) - g.Expect(files[16].Type).To(Equal(file.TypeRegular)) - streamCfg := string(files[16].Content) + g.Expect(files[16].Meta.Name).To(Equal("/etc/nginx/stream-conf.d/stream.conf")) + g.Expect(files[16].Meta.Permissions).To(Equal(file.RegularFileMode)) + streamCfg := string(files[16].Contents) g.Expect(streamCfg).To(ContainSubstring("listen unix:/var/run/nginx/app.example.com-443.sock")) g.Expect(streamCfg).To(ContainSubstring("listen 443")) g.Expect(streamCfg).To(ContainSubstring("app.example.com unix:/var/run/nginx/app.example.com-443.sock")) diff --git a/internal/mode/static/nginx/config/main_config.go b/internal/mode/static/nginx/config/main_config.go index 1b27a52e74..edb837d2bc 100644 --- a/internal/mode/static/nginx/config/main_config.go +++ b/internal/mode/static/nginx/config/main_config.go @@ -3,8 +3,12 @@ package config import ( gotemplate "text/template" + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + filesHelper "github.com/nginx/agent/v3/pkg/files" + "github.com/nginx/nginx-gateway-fabric/internal/framework/file" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/shared" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -50,7 +54,7 @@ type mgmtConf struct { // generateMgmtFiles generates the NGINX Plus configuration file for the mgmt block. As part of this, // it writes the secret and deployment context files that are referenced in the mgmt block. -func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.File { +func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []agent.File { if !g.plus { return nil } @@ -60,47 +64,59 @@ func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.Fi panic("nginx plus token not set in expected map") } - tokenFile := file.File{ - Content: tokenContent, - Path: secretsFolder + "/license.jwt", - Type: file.TypeSecret, + tokenFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/license.jwt", + Hash: filesHelper.GenerateHash(tokenContent), + Permissions: file.SecretFileMode, + }, + Contents: tokenContent, } - files := []file.File{tokenFile} + files := []agent.File{tokenFile} cfg := mgmtConf{ Endpoint: g.usageReportConfig.Endpoint, Resolver: g.usageReportConfig.Resolver, - LicenseTokenFile: tokenFile.Path, + LicenseTokenFile: tokenFile.Meta.Name, SkipVerify: g.usageReportConfig.SkipVerify, } if content, ok := conf.AuxiliarySecrets[graph.PlusReportCACertificate]; ok { - caFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-ca.crt", - Type: file.TypeSecret, + caFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-ca.crt", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.CACertFile = caFile.Path + cfg.CACertFile = caFile.Meta.Name files = append(files, caFile) } if content, ok := conf.AuxiliarySecrets[graph.PlusReportClientSSLCertificate]; ok { - certFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-tls.crt", - Type: file.TypeSecret, + certFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-tls.crt", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.ClientSSLCertFile = certFile.Path + cfg.ClientSSLCertFile = certFile.Meta.Name files = append(files, certFile) } if content, ok := conf.AuxiliarySecrets[graph.PlusReportClientSSLKey]; ok { - keyFile := file.File{ - Content: content, - Path: secretsFolder + "/mgmt-tls.key", - Type: file.TypeSecret, + keyFile := agent.File{ + Meta: &pb.FileMeta{ + Name: secretsFolder + "/mgmt-tls.key", + Hash: filesHelper.GenerateHash(content), + Permissions: file.SecretFileMode, + }, + Contents: content, } - cfg.ClientSSLKeyFile = keyFile.Path + cfg.ClientSSLKeyFile = keyFile.Meta.Name files = append(files, keyFile) } @@ -111,10 +127,14 @@ func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []file.Fi files = append(files, deploymentCtxFile) } - mgmtBlockFile := file.File{ - Content: helpers.MustExecuteTemplate(mgmtConfigTemplate, cfg), - Path: mgmtIncludesFile, - Type: file.TypeRegular, + mgmtContents := helpers.MustExecuteTemplate(mgmtConfigTemplate, cfg) + mgmtBlockFile := agent.File{ + Meta: &pb.FileMeta{ + Name: mgmtIncludesFile, + Hash: filesHelper.GenerateHash(mgmtContents), + Permissions: file.RegularFileMode, + }, + Contents: mgmtContents, } return append(files, mgmtBlockFile) diff --git a/internal/mode/static/state/conditions/conditions.go b/internal/mode/static/state/conditions/conditions.go index 00feac778a..b82ccead99 100644 --- a/internal/mode/static/state/conditions/conditions.go +++ b/internal/mode/static/state/conditions/conditions.go @@ -19,7 +19,7 @@ const ( // ListenerMessageFailedNginxReload is a message used with ListenerConditionProgrammed (false) // when nginx fails to reload. ListenerMessageFailedNginxReload = "The Listener is not programmed due to a failure to " + - "reload nginx with the configuration. Please see the nginx container logs for any possible configuration issues." + "reload nginx with the configuration" // RouteReasonBackendRefUnsupportedValue is used with the "ResolvedRefs" condition when one of the // Route rules has a backendRef with an unsupported value. @@ -68,7 +68,7 @@ const ( // GatewayMessageFailedNginxReload is a message used with GatewayConditionProgrammed (false) // when nginx fails to reload. GatewayMessageFailedNginxReload = "The Gateway is not programmed due to a failure to " + - "reload nginx with the configuration. Please see the nginx container logs for any possible configuration issues" + "reload nginx with the configuration" // RouteMessageFailedNginxReload is a message used with RouteReasonGatewayNotProgrammed // when nginx fails to reload. diff --git a/internal/mode/static/state/graph/graph.go b/internal/mode/static/state/graph/graph.go index b05e0c35e7..4e98bf8402 100644 --- a/internal/mode/static/state/graph/graph.go +++ b/internal/mode/static/state/graph/graph.go @@ -84,6 +84,14 @@ type Graph struct { SnippetsFilters map[types.NamespacedName]*SnippetsFilter // PlusSecrets holds the secrets related to NGINX Plus licensing. PlusSecrets map[types.NamespacedName][]PlusSecretFile + + LatestReloadResult NginxReloadResult +} + +// NginxReloadResult describes the result of an NGINX reload. +type NginxReloadResult struct { + // Error is the error that occurred during the reload. + Error error } // ProtectedPorts are the ports that may not be configured by a listener with a descriptive name of each port. diff --git a/internal/mode/static/status/prepare_requests.go b/internal/mode/static/status/prepare_requests.go index 46e150a8cb..f3bd39c2a3 100644 --- a/internal/mode/static/status/prepare_requests.go +++ b/internal/mode/static/status/prepare_requests.go @@ -19,18 +19,12 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) -// NginxReloadResult describes the result of an NGINX reload. -type NginxReloadResult struct { - // Error is the error that occurred during the reload. - Error error -} - // PrepareRouteRequests prepares status UpdateRequests for the given Routes. func PrepareRouteRequests( l4routes map[graph.L4RouteKey]*graph.L4Route, routes map[graph.RouteKey]*graph.L7Route, transitionTime metav1.Time, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, gatewayCtlrName string, ) []frameworkStatus.UpdateRequest { reqs := make([]frameworkStatus.UpdateRequest, 0, len(routes)) @@ -107,7 +101,7 @@ func prepareRouteStatus( gatewayCtlrName string, parentRefs []graph.ParentRef, conds []conditions.Condition, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, transitionTime metav1.Time, srcGeneration int64, ) v1.RouteStatus { @@ -214,7 +208,7 @@ func PrepareGatewayRequests( ignoredGateways map[types.NamespacedName]*v1.Gateway, transitionTime metav1.Time, gwAddresses []v1.GatewayStatusAddress, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, ) []frameworkStatus.UpdateRequest { reqs := make([]frameworkStatus.UpdateRequest, 0, 1+len(ignoredGateways)) @@ -240,7 +234,7 @@ func prepareGatewayRequest( gateway *graph.Gateway, transitionTime metav1.Time, gwAddresses []v1.GatewayStatusAddress, - nginxReloadRes NginxReloadResult, + nginxReloadRes graph.NginxReloadResult, ) frameworkStatus.UpdateRequest { if !gateway.Valid { conds := conditions.ConvertConditions( @@ -272,9 +266,10 @@ func prepareGatewayRequest( } if nginxReloadRes.Error != nil { + msg := fmt.Sprintf("%s: %s", staticConds.ListenerMessageFailedNginxReload, nginxReloadRes.Error.Error()) conds = append( conds, - staticConds.NewListenerNotProgrammedInvalid(staticConds.ListenerMessageFailedNginxReload), + staticConds.NewListenerNotProgrammedInvalid(msg), ) } @@ -302,9 +297,10 @@ func prepareGatewayRequest( } if nginxReloadRes.Error != nil { + msg := fmt.Sprintf("%s: %s", staticConds.GatewayMessageFailedNginxReload, nginxReloadRes.Error.Error()) gwConds = append( gwConds, - staticConds.NewGatewayNotProgrammedInvalid(staticConds.GatewayMessageFailedNginxReload), + staticConds.NewGatewayNotProgrammedInvalid(msg), ) } diff --git a/internal/mode/static/status/prepare_requests_test.go b/internal/mode/static/status/prepare_requests_test.go index 5c0a9df34c..8bb8ca34f7 100644 --- a/internal/mode/static/status/prepare_requests_test.go +++ b/internal/mode/static/status/prepare_requests_test.go @@ -3,6 +3,7 @@ package status import ( "context" "errors" + "fmt" "testing" "github.com/go-logr/logr" @@ -274,7 +275,7 @@ func TestBuildHTTPRouteStatuses(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -353,7 +354,7 @@ func TestBuildGRPCRouteStatuses(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -430,7 +431,7 @@ func TestBuildTLSRouteStatuses(t *testing.T) { routes, map[graph.RouteKey]*graph.L7Route{}, transitionTime, - NginxReloadResult{}, + graph.NginxReloadResult{}, gatewayCtlrName, ) @@ -534,7 +535,7 @@ func TestBuildRouteStatusesNginxErr(t *testing.T) { map[graph.L4RouteKey]*graph.L4Route{}, routes, transitionTime, - NginxReloadResult{Error: errors.New("test error")}, + graph.NginxReloadResult{Error: errors.New("test error")}, gatewayCtlrName, ) @@ -740,7 +741,7 @@ func TestBuildGatewayStatuses(t *testing.T) { routeKey := graph.RouteKey{NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}} tests := []struct { - nginxReloadRes NginxReloadResult + nginxReloadRes graph.NginxReloadResult gateway *graph.Gateway ignoredGateways map[types.NamespacedName]*v1.Gateway expected map[types.NamespacedName]v1.GatewayStatus @@ -1087,7 +1088,7 @@ func TestBuildGatewayStatuses(t *testing.T) { ObservedGeneration: 2, LastTransitionTime: transitionTime, Reason: string(v1.GatewayReasonInvalid), - Message: staticConds.GatewayMessageFailedNginxReload, + Message: fmt.Sprintf("%s: test error", staticConds.GatewayMessageFailedNginxReload), }, }, Listeners: []v1.ListenerStatus{ @@ -1125,14 +1126,14 @@ func TestBuildGatewayStatuses(t *testing.T) { ObservedGeneration: 2, LastTransitionTime: transitionTime, Reason: string(v1.ListenerReasonInvalid), - Message: staticConds.ListenerMessageFailedNginxReload, + Message: fmt.Sprintf("%s: test error", staticConds.ListenerMessageFailedNginxReload), }, }, }, }, }, }, - nginxReloadRes: NginxReloadResult{Error: errors.New("test error")}, + nginxReloadRes: graph.NginxReloadResult{Error: errors.New("test error")}, }, { name: "valid gateway with valid parametersRef; all valid listeners", diff --git a/internal/mode/static/status/queue.go b/internal/mode/static/status/queue.go new file mode 100644 index 0000000000..5f31bbec6d --- /dev/null +++ b/internal/mode/static/status/queue.go @@ -0,0 +1,66 @@ +package status + +import ( + "context" + "sync" + + "k8s.io/apimachinery/pkg/types" +) + +// QueueObject is the object to be passed to the queue for status updates. +type QueueObject struct { + Error error + Deployment types.NamespacedName +} + +// Queue represents a queue with unlimited size. +type Queue struct { + notifyCh chan struct{} + items []*QueueObject + + lock sync.Mutex +} + +// NewQueue returns a new Queue object. +func NewQueue() *Queue { + return &Queue{ + items: []*QueueObject{}, + notifyCh: make(chan struct{}, 1), + } +} + +// Enqueue adds an item to the queue and notifies any blocked readers. +func (q *Queue) Enqueue(item *QueueObject) { + q.lock.Lock() + defer q.lock.Unlock() + + q.items = append(q.items, item) + + select { + case q.notifyCh <- struct{}{}: + default: + } +} + +// Dequeue removes and returns the front item from the queue. +// It blocks if the queue is empty or when the context is canceled. +func (q *Queue) Dequeue(ctx context.Context) *QueueObject { + q.lock.Lock() + defer q.lock.Unlock() + + for len(q.items) == 0 { + q.lock.Unlock() + select { + case <-ctx.Done(): + q.lock.Lock() + return nil + case <-q.notifyCh: + q.lock.Lock() + } + } + + front := q.items[0] + q.items = q.items[1:] + + return front +} diff --git a/internal/mode/static/status/queue_test.go b/internal/mode/static/status/queue_test.go new file mode 100644 index 0000000000..0bed3cee62 --- /dev/null +++ b/internal/mode/static/status/queue_test.go @@ -0,0 +1,94 @@ +package status + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" +) + +func TestNewQueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + + g.Expect(q).ToNot(BeNil()) + g.Expect(q.items).To(BeEmpty()) +} + +func TestEnqueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + } + q.Enqueue(item) + + g.Expect(q.items).To(HaveLen(1)) + g.Expect(q.items[0]).To(Equal(item)) +} + +func TestDequeue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + } + q.Enqueue(item) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dequeuedItem := q.Dequeue(ctx) + g.Expect(dequeuedItem).To(Equal(item)) + g.Expect(q.items).To(BeEmpty()) +} + +func TestDequeueEmptyQueue(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + dequeuedItem := q.Dequeue(ctx) + g.Expect(dequeuedItem).To(BeNil()) +} + +func TestDequeueWithMultipleItems(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + q := NewQueue() + item1 := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-1"}, + } + item2 := &QueueObject{ + Error: nil, + Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-2"}, + } + q.Enqueue(item1) + q.Enqueue(item2) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dequeuedItem1 := q.Dequeue(ctx) + g.Expect(dequeuedItem1).To(Equal(item1)) + + dequeuedItem2 := q.Dequeue(ctx) + + g.Expect(dequeuedItem2).To(Equal(item2)) + g.Expect(q.items).To(BeEmpty()) +} From 94b295f0391ae1fb3b7cd40b83f28b8907f6f08f Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Fri, 31 Jan 2025 13:37:38 -0700 Subject: [PATCH 06/32] CP/DP Split: Fix empty plus file, blocking calls (#3078) Problem: The NGINX Plus API conf file was empty when sending using OSS, which caused an error applying config. This also revealed an issue where we received multiple messages from agent, causing some channel blocking. Solution: Don't send the empty NGINX conf file if not running N+. Ignore responses from agent about rollbacks, so we only ever process a single response as expected. --- internal/mode/static/nginx/agent/agent.go | 5 +-- internal/mode/static/nginx/agent/command.go | 31 +++++++++++++++---- internal/mode/static/nginx/agent/file.go | 2 ++ internal/mode/static/nginx/config/plus_api.go | 6 ++-- .../mode/static/nginx/config/plus_api_test.go | 18 ++--------- 5 files changed, 35 insertions(+), 27 deletions(-) diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 28a20f1872..58fad509db 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -85,10 +85,11 @@ func (n *NginxUpdaterImpl) UpdateConfig( deployment *Deployment, files []File, ) bool { - n.logger.Info("Sending nginx configuration to agent") - msg := deployment.SetFiles(files) applied := deployment.GetBroadcaster().Send(msg) + if applied { + n.logger.Info("Sent nginx configuration to agent") + } deployment.SetLatestConfigError(deployment.GetConfigurationStatus()) diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index 04b482ffba..236a34f57d 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -119,6 +119,8 @@ func (cs *commandService) CreateConnection( // If any connection or unrecoverable errors occur, return and agent should re-establish a subscription. // If errors occur with applying the config, log and put those errors into the status queue to be written // to the Gateway status. +// +//nolint:gocyclo // could be room for improvement here func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error { ctx := in.Context() @@ -179,6 +181,7 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error panic(fmt.Sprintf("unknown request type %d", msg.Type)) } + cs.logger.V(1).Info("Sending configuration to agent", "requestType", msg.Type) if err := msgr.Send(ctx, req); err != nil { cs.logger.Error(err, "error sending request to agent") deployment.SetPodErrorStatus(conn.PodName, err) @@ -189,7 +192,10 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error case err = <-msgr.Errors(): cs.logger.Error(err, "connection error", "pod", conn.PodName) deployment.SetPodErrorStatus(conn.PodName, err) - channels.ResponseCh <- struct{}{} + select { + case channels.ResponseCh <- struct{}{}: + default: + } if errors.Is(err, io.EOF) { return grpcStatus.Error(codes.Aborted, err.Error()) @@ -198,7 +204,11 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error case msg := <-msgr.Messages(): res := msg.GetCommandResponse() if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { - err := fmt.Errorf("bad response from agent: msg: %s; error: %s", res.GetMessage(), res.GetError()) + if isRollbackMessage(res.GetMessage()) { + // we don't care about these messages, so ignore them + continue + } + err := fmt.Errorf("msg: %s; error: %s", res.GetMessage(), res.GetError()) deployment.SetPodErrorStatus(conn.PodName, err) } else { deployment.SetPodErrorStatus(conn.PodName, nil) @@ -268,6 +278,8 @@ func (cs *commandService) setInitialConfig( for _, action := range deployment.GetNGINXPlusActions() { // retry the API update request because sometimes nginx isn't quite ready after the config apply reload timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + var overallUpstreamApplyErr error + if err := wait.PollUntilContextCancel( timeoutCtx, 500*time.Millisecond, @@ -287,13 +299,14 @@ func (cs *commandService) setInitialConfig( } if upstreamApplyErr != nil { - return false, nil //nolint:nilerr // this error is collected at the end + overallUpstreamApplyErr = errors.Join(overallUpstreamApplyErr, upstreamApplyErr) + return false, nil } return true, nil }, ); err != nil { - if strings.Contains(err.Error(), "bad response from agent") { - errs = append(errs, err) + if overallUpstreamApplyErr != nil { + errs = append(errs, overallUpstreamApplyErr) } else { cancel() return err @@ -330,7 +343,7 @@ func (cs *commandService) waitForInitialConfigApply( case msg := <-msgr.Messages(): res := msg.GetCommandResponse() if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { - applyErr := fmt.Errorf("bad response from agent: msg: %s; error: %s", res.GetMessage(), res.GetError()) + applyErr := fmt.Errorf("msg: %s; error: %s", res.GetMessage(), res.GetError()) return applyErr, nil } @@ -379,6 +392,12 @@ func buildRequest(fileOverviews []*pb.File, instanceID, version string) *pb.Mana } } +func isRollbackMessage(msg string) bool { + msgToLower := strings.ToLower(msg) + return strings.Contains(msgToLower, "rollback successful") || + strings.Contains(msgToLower, "rollback failed") +} + func buildPlusAPIRequest(action *pb.NGINXPlusAction, instanceID string) *pb.ManagementPlaneRequest { return &pb.ManagementPlaneRequest{ MessageMeta: &pb.MessageMeta{ diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go index a4163ea187..35f26b628c 100644 --- a/internal/mode/static/nginx/agent/file.go +++ b/internal/mode/static/nginx/agent/file.go @@ -75,6 +75,8 @@ func (fs *fileService) GetFile( return nil, status.Errorf(codes.NotFound, "file not found") } + fs.logger.V(1).Info("Getting file for agent", "file", filename) + return &pb.GetFileResponse{ Contents: &pb.FileContents{ Contents: contents, diff --git a/internal/mode/static/nginx/config/plus_api.go b/internal/mode/static/nginx/config/plus_api.go index 9b1894fe30..d4988bb838 100644 --- a/internal/mode/static/nginx/config/plus_api.go +++ b/internal/mode/static/nginx/config/plus_api.go @@ -10,15 +10,15 @@ import ( var plusAPITemplate = gotemplate.Must(gotemplate.New("plusAPI").Parse(plusAPITemplateText)) func executePlusAPI(conf dataplane.Configuration) []executeResult { - result := executeResult{ - dest: nginxPlusConfigFile, - } + var result executeResult // if AllowedAddresses is empty, it means that we are not running on nginx plus, and we don't want this generated if conf.NginxPlus.AllowedAddresses != nil { result = executeResult{ dest: nginxPlusConfigFile, data: helpers.MustExecuteTemplate(plusAPITemplate, conf.NginxPlus), } + } else { + return nil } return []executeResult{result} diff --git a/internal/mode/static/nginx/config/plus_api_test.go b/internal/mode/static/nginx/config/plus_api_test.go index 6afb79142a..f664143402 100644 --- a/internal/mode/static/nginx/config/plus_api_test.go +++ b/internal/mode/static/nginx/config/plus_api_test.go @@ -43,21 +43,7 @@ func TestExecutePlusAPI_EmptyNginxPlus(t *testing.T) { } g := NewWithT(t) - expSubStrings := map[string]int{ - "listen unix:/var/run/nginx/nginx-plus-api.sock;": 0, - "access_log off;": 0, - "api write=on;": 0, - "listen 8765;": 0, - "root /usr/share/nginx/html;": 0, - "allow 127.0.0.1;": 0, - "deny all;": 0, - "location = /dashboard.html {}": 0, - "api write=off;": 0, - } - for expSubStr, expCount := range expSubStrings { - res := executePlusAPI(conf) - g.Expect(res).To(HaveLen(1)) - g.Expect(expCount).To(Equal(strings.Count(string(res[0].data), expSubStr))) - } + res := executePlusAPI(conf) + g.Expect(res).To(BeNil()) } From 31649433acb859ab29f9a1511e866c9039b695e6 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Mon, 10 Feb 2025 15:18:19 -0800 Subject: [PATCH 07/32] CP/DP split: Add leader election (#3092) Add leader election to allow data plane pods to only connect to the lead NGF pod. If control plane is scaled, only the leader is marked as ready and the backups are Unready so the data plane doesn't connect to them. Problem: We want the NGF control plane to fail-over to another pod when the control plane pod goes down. Solution: Only the leader pod is marked as ready by Kubernetes, and all connections from data plane pods are connected to the leader pod. --- .../control-data-plane-split/graph-conns.png | Bin 22038 -> 23782 bytes internal/framework/runnables/runnables.go | 28 +++--- .../framework/runnables/runnables_test.go | 22 +++-- internal/mode/static/handler.go | 30 +++++- internal/mode/static/handler_test.go | 28 ++++-- internal/mode/static/health.go | 87 +++++++++++++++--- internal/mode/static/health_test.go | 86 ++++++++++++++++- internal/mode/static/manager.go | 24 +++-- 8 files changed, 253 insertions(+), 52 deletions(-) diff --git a/docs/proposals/control-data-plane-split/graph-conns.png b/docs/proposals/control-data-plane-split/graph-conns.png index bb41cd488e53fa54f625eef29454e2f44d8c5a5b..b383363917f44f6cd478d8d2fdb5175008fa693c 100644 GIT binary patch literal 23782 zcmeFZXH-<(vMAaFL8Ji%$p{$8k~0VhiqH*B4nh+pHlbIZ0|V*b0(! zY*K^dq(sTD`TX`i``mZly=T1pe%v?4W(>w!tLB_Ft7=x&tXZp8ztd1tBD=wG0{{S! zfs~(W0RRLD0D#c-Ispzsj8Eu`yRcd)Ye4`2FLnSR;57hnf&&Gt0sw9T0KmE#03ekB z0MH^*>NOwX8jdwobrdfzFMs@4iit~lB=?N-p|H6ntg@ zzqPgfIwVvVYEe*JuB@h?oSv-&(MwFp2#t&h3Hx|(a2OUDtNGm2+TPjd1uXnyoR!^6 znWqqaV@prpU^^#wcdtNuxQmmshn=G{!r7yvv!}AUZsEsLZb6Ap;9K9Ix0N+@1A`+i zZC^_(stwI-O3SNNG!4Ie?T}Vbv2k?W+uKi0&k7C+|JL3a_$Jgl0BL6BsHS5qD*c3u zSEQuuGsvr76ek3{7b;3m0dKF}QN}%xAe0SU006q=tA9L@TW|~xNbCxNC=##XU!x+Y zL}T#v0RY1a&{KIG&#A2@lJp-EH8UMu?+c4d?~x$z#7BmNhtKPqY)@eApC1~SJecf{ z`=PjdbScn&js9a>&%{mAmw0KHWzsw}M4o`zCjqPgb`sni|6l*j2YaaRh(A33`73qj zGv55H&;C}C_M>CIJAm!(Ve7rKwL40FUS6{fvL}2jfbGtP-sL^oxqA`2uPWt>D8GK~ z=&f`j;rP89_KfZAdy2;UC&SB4P9%WokG;zrbChc4hK)XqjQ|Ps*}XMV7heFXP3BYG z20<&yHeRnW3qTD)VY__-Km>U|b{QuQpqeH~YLEq_%rg&6AK=Xs9tA9yQUEwm#3CNP z0EvGLT?tUDH2R*|2zP>+$QZH!QeKj+8Wud`9Nm^KjitSi&;~zx&H^ZY30yUN_vLJu z3#xq({L5G}AFjMQx5Y8cvL`miUd+%D0M~K4td6sb(RwA6(89eYU6NE> z9Q(_n)Na>O%QnZ=O4V+bU5l*UPk`yUI2_No@Ng+yBeN{5jfkz&(s72{Kx;Hl*QPiz zKD^K?<3wvXGO7K72uV=X&ikpPR`toC*=)k4H}qQ~MW<~64G6s$q{(G`PvWe zRwh2nDsPI@j{clnGoUs4ZWYIvZBin&uR)Q+$sHi^Sh&UGJn)iYdUbBDN~{|3-aSaC z!z*nLt(6gJ7(=91QQJf@uYHN(aLq^|y*~vy&~1@nfMAAH1H6BzReC48!USHrN>pDy z%r$CFX1tM@n^%!IsivVQF6$p$G_W}VIs;iHy2=f`(6>h9>F|VT`UWTd+{znOdp|7?vBJhwtRSNU~-^x zjJWm1{~PW3pDLai^MGf&_*>FtymPn8;{_-I;AruDIoXLM&vVM>kQXmNdbjG8wb2`& zS2#N$3Wgp4OHZ6XyUFlu!^f(3}Ow+`|j@KJp0Vwe~ zxpU7Q%yZ&b7s$g00FDZZI{?5#LLhN#P%8rP@cRGXZ|?0AWUQ+1KuH0E(S>&koOl6W z`}?6S=bKOx0MEQS6n)-VO9{|>Tp?-KWQ)lmXX{ z-<%zWI#C0PUz>I!T2A!vTEUzHnS^HYe)72HrQg)NU_e}!Ug05b4MsJ%bx+0stzN%F z=6rYnW{M-Wn)qhN7hA8W2zLB^PG&p|@DUMZ&Ed}KX+H7nfV|_H$wzYM`+WGa=bkN- zJ_p8WfRu$(>pg6!)2*Y0?vUQ)RHs`wC$zXM$1cHc7Mo-tZPJu`ut;IS;#s24^|x;dsK!t@2Cct z9HJeG1qyStAtAzw;@n#eiC;koF7$;s^NI4?-vY-U;X6UMzd+K&G#0U!m;9YhoD8 zotFe+%M=~!>g{(huC_r1iN!Aaj_-pe6BxS2v>ffp#uL(qopM0pFk6n?XTycHZYDX} zCyFpXb4$7vGFz>nP-Qmh7%3|W=WxNq0TSIu(O6_BD0f+Kj)MK8R5i^&mqf)wokG>h zGUXa%Q7=tXK~4J+#A0^Y!ca$8D`>G_S~seJ)Itmd#ejEv`FrwgfO$UgYg^ORbMDnY z3`K(&^T;@;>?I%{`Aiv0^%uV-!G489Yr0016>VzDa=bvTn@`B#QP-1V ztY&Hu(~c9}R7j&267|^(N%}}(`gITSFV%HI(OnW*S-qB7pW_L0s|vHT{XX{i%z3iSOk?EznasCw8`pB6h;qTvw- zUfW=*nl5H&6rKKw^LEFfhD5ee_s-LR!^4G{2)+XQ;cV5l16J=7cHRfouYOpfvU`wG z4jNOvTOHcOtzy*~!YwSDp@pBepBaF`X!eAW;|T!g=F&84hcEjg|Ija5DBH-fE5z@M zf=z8+#S0-{Zw3l%6yT1t0Xav9W>=${^ODHVjh}B4G{j9Bo?1v~&ZM=!{|HC>#V$n|P*5SI*mPd*y}8 z!xIF|pgx0ywhqVUjDqH^HNP`S1}1jVVlTb~IQ~+k`c$zL;`sG79%dnw{mOg)dxC$9 z=zmj-l>8#u|3e|AcHMAc0#@yBP3%v0YG#{`&g-?AqueI^qisiydlw#a4BH*?N6G5V zqSGXQG`rtu-{JUhJO1yBXQ$=Z z$(QGP*k7V<`VGzj&oO*?v31MkzCC}lLcy0>WchRNuhZYVLdzW9*lK>OE8l%v7wbkl zHy{F=5(##?bGKsqZCzq{Xj$i~mp#uX#)otmYL(3m>>mJj*RqTkI8tXYNMneciiyJ>G|+0!v+ z>SMJ_xjkOCrx*1+&8=}@pLftX@U*f={pC8e%gwR@NZ34<&hl9`*sZC=2oj8#l^HMCOTZ&x zWzsQOXnWy^WZTKjCDiYSdgi`XbrsPG6YlSC%%}O1t&4lW(;Vk3nb$&F!g} zjA~8~(WkbM_q5N>TTbF`abMEeo_+#WD%O?Uy%?+w+c;hcd1!k}UMP9;c0D*&nc(dbe1R`({(E`lF^1SGfw!vy za$M4E6%!x$lety3^WSyX+WK{bgAyyQK-*$1uV~9A ztprs(K^p!P|9OsCY4|la!%Yq@YPzEw7j)y<3Z$ywsv{B z(?BenF3Kc)-jZGtE%&*(n8H}j-?s<=o~XLn=Kw z0w9W8*{4DHEL0EG9X!?1x?ez-*RDBHGDXBTHpucn>Op7~2t$EgaYjY;S;?LPMpN9c z_~gy2sCqFm=k%pxE2JsT?9i-6nYb!BbnJppTQbvB5bf?NNQxOPq^C zxN8LsyBb-@#tRJZ$V8i(OWe;cmTIW1-|(h{Oht>7dF?rYPiMEwcCtHd)LFYiqdJ+yIMMTIfgh8$5>uSo$KRIfmTVtC53f-UFCJ3R zGRhfNBe}eczc=B>g9OJSo3hg}79t;34VMlR*6&yMlOZeHG*wSCJ*unNCfs0AT`w4! z7O`cjr z7k5Me41$1rc98*h#*Glw;5)h-=QR9R~$CicX>2M;=E zvU?-5g2(fbY9zU>Yp-*+IEE}gNpS5Dp&f(K?N5{zxG0wCY)3g1l7;6Edkn)u zP()w$&17qht`L;&6e+A|u&iS!IjPAmrek;`cFKW&wBoQ$7bzSfCb|GBlRS*ZZYJK( zY2|hAs20RCN>=F|V|t=kI&K`*LpHvDD3cSbLFB3?cNa}!Pz~O@B_;wYBp)@T;higVFB<-V>y%-hyvhWQ9-P}Xg2&4*^&7z9SG_)|- zUUZ=Iiw?IXs(s`U`1U_I6(jJnM@5Nn{E(WejaAGmdfms^4$_5c9(GyUOJG867%AA* zEodO$c-L&lVmw`}9;J-7kdil)IoPA#-%yL9uJ`Z` zgG;qMl#X#~B9kVXH)i=80$oAc96%QT96gzp>_Em*GpFz5TL4*Z+)K|T4(Z%mwqmFn| zt8_~!2iY;bn|JAbleDtXCQ=2iQgqIbB2B0$Yz}sUESyZWI}&F=C(}^vYC4r~m};OC!l{x=TQ{uiQGzik`@iOku?`m5K8~n!2EKGVi9P=euicrI@sXMs)w?%jVh!@b=2VSeX8R&Sv754m<48;qcF2*;Cc+hyIM9oUCp@&5kdOT= zV=?i?Q*iouw2@vKw2uJ}>{yP)b~Q1;;~BG*shb#dd+jCG@wE)3QfVnS2B%EW;qj>l zA1PacvF@iy^rf>!Sa6p9M|~vhpN==cvl!lR);rPK29cc;i?W3(oG^#xaF z=P%u&B^7*a!{Z6o!uB|0n6Sns(8ezHA|-7+MKopW33!wTJ!y|J=i$kscyREM(L9`v zf7^N-AzGl)A*`5;QL#FWHW9UGsFRt_M+IXyn|Jk*mE#CZnr1yKPr-Z+ocl)UuLZdK zFNX`UY)nm>Hk5N|o1xFB-L>z;6ETKpr_Hoas>8OMbQPRwl}Nb^kkm@m#dVABs$*!uw&2|iSWf2@x+sd__yvWMan=WWbQ%rQRnkc&suQ7sd@%CY+ zDOh_ATnAA}uH2c}EpZ^q7pP*EhV;Z8HS!9;ia~lgwW3D$Es0)S$%7AaxNOXws8(FR ztaD0v!@$qsPM!(RylgNHsdmB`mwX*IhdW$re!kvSxBM%y42%?x4Wdld?yJ>XIXyK7 zj)h1m6)n5@qZ-EQxTvnGIU1k)95c?zAMG}C9I|@~jMs}JiIqqv$f?S+RMy3K!m+E> zcd9g(az5>;u;;e&Hn=NuCt1wyT(^) zBx8rohk<4)8DrPPaWR$x&f*`MGMXY$&T;#$?|B^^h1S#5*p| zSWxKq&^<2`7ZFzW@DZBZu?0zvYPne-TwRZfC*o|lFkfnD?73?dzFSgO-O;fq#b(!t z-wZLroIdx>tNeN~d%KBpvVy0M5hzZiWVJ*#5+SgVm!(N@IQJ;sLFg^Kk@D7UfTY`@ zo(0iR!Ti#|)K6}!;VH&srQ+FlEUb^t9RX`+jO>!mZ(8YCh!W!+4>BC$QO z=6UMhiV7~0wsjg$M439f)l&r=$dk3L8tl;Y9pSW7N?dCZBL%b8{K)$q1!q4TO)JWoFw6@Kv8)M)71PbEw2*h{?uh~i~VqoXGhwn7LJ2~** zz<&$mzo`s_HYol;Vu#g};|&X%@-k2IUWA;Zn5mjmn>4G!c=`i5F*zR)Yg;O$xP0epFvFwaXnuKd#8Pml2o7tFFhL?TKPSVBa`W(y|9K-J4b3MPR;lSk_ z)n2GQ$E6&Z7W0l)l8GB}r&lq#RLnAQr5-}{BzmX6QLU9&!0HTpzk~8@4P(7`VH6q_ zeZg-k=fH<*J>K=(yXeqI=3xg87#n=Ouim4UEHEvHeT!Y$^RvjkjG8(;^`7Hi9Bp~k zzcB3`#c!IP=1V&)pVngR>ei=x8QI-e+Twq*35BJci1(|P8m5ifYN*aK@SIOh}ZO!(L3ryXOs@V-f%zCnfYbe%HKv~ zn*Zg$3+BBms61h6CY>RWb%^>wlKu0ravj zgGEO6oj*Vwq|0ZUs?nA1I}e&RJa!^3_D!pCslrO)p!>_Yib?hdl>$2Zr8LmucZD-h z`{WsEbN@PTkNv#MMumgr!1+bd-#{jg(R)ws+@^2qu>VOf0WN^{l*)kf6{&n0iP8hJ zETu+183t)Ydgs?KcD8Cx_0{6{{e#qq6<{g@$MpkLNTGBZkg0#N<*+0`ID`2}!>uDa za8Z)fePWdIUpgbD!OG!qkl^b@A*wS@_=iB*hhi4 znl}iMdxSUqQvWIKpfyRse~9M^ztw9(ma;aRzr_Au3-CXa`N!T6k8A{{NfczZ|FYQ* zm%li;YPu54y^&Tj-J=@;Shc{`YaY+Co2d~v4KQ>?Fyh3~;Mr9cM4rU|8#-WsgV39%LF|9PE8Tn(u2@l zjZ7qBZUgyJgyj(a4|DuI!YV5H=_8jXFNnXIDprizdQsC23N%VOX1gd zG%&z<4UoQ)yOr==B6Wk?t1%UjA;3RC!aDxR> zgaw#T%s2{)ag0Ejtt9}h&{6>=A`m`;H-Q)MH*aYHH|T<*PnhXo_{?O$T4Esa|8P;I zFT)DR5dX}IZKi$%RPD#x5Cg zV}4G4WJh0elhxQRk$a7^c-wQF0UK)Fi4V@Uy4CwR9z9~kPO)Jx9KXabH_5X8;v=Ep zBa!>0akY!;)}~K5!G}$8$KtO0+F?CW}f>;PSb-7+? zLW29L<5hVlHUAOTOrl)xj}s#>qc<4?#;jSjOhAJX67fGKi%rpKkq~M|4Q`* z#4V|7uU=}quko*!K>cLuCd)9r>$P*|TTxP!0oNz1x%iutxcObWOChi3`AiQx*xVif zZtYfDggTGsK3a)0ji}}{or-B;2NZ|3NYrq8ICiNZVoQ>> zx0RqV?UnwY6iM{D(zy;VDdJ7f+!LXGl4F_EUSc&gOAUEPjL1iFzUK=KwymTixo7AM zMafb1l^F%cF1dB|+&PVT=x&yi&7GC=on}(v%`42>=dQE7-v()xRwut3#;E7=9q&in zWs!3dW6|jN)h2R&v3wXh$LG?`-(eV&WwU6f*^F_JbqQZDOYWtZV`z;qy#tM@@vHyJ z+rsRqY1rRBnt6u8mFr<8(#04{IfALRa2##JCSr8<3i_kD8a<9 zP>WC@&;^bQwFgGe#xhmKlGAYkc3ViV(i~-L#FaRrA2c|O{3>{TX<)<)wm^9_v!%=n z13TWfz0x!EpNxX$QAYGQ2{1{JCNoyE$U2jcbq8SK?1!F~Zdq|JBhgl^h+0bTN4s^J zr^Ca0>;VB1WI&Jm6&?4>7^~A?b!=Sw*$41PE3!m3!bkjGkvD0Qpl6=#|N6f9Y%8zV z8~1XS0ND_}y7+N2e99w(LN*8szklLsNyBQIu=aA9{ABp9w69WC+`$`A6FVWgLh9lu zQpuX$GOr`pFK1*8?^+$@J0^~y6*vL0nZ%A)X1^KYgmkfXm_jIuAL+t`AVAAOp<0YC+FMdX@J$B6V1J$owy+Se?#uO;n<~=-%4ZSxzwYH46A&GyXJa zQ&+H5%)dp1F#%h2fj@Ha6C?LZIZlAPV@_loC3T1PD4pw?;pv0;)UiX>m}k08#$QgB z7~p5$mRb?B2T#-GXHPtk(O`-uR$7B(1HyycAGzczoq7aK_NqDKMdxZ%d8hcAjHOKP zhY9dcW}@Zzeb!eUy=cG!LNXL%Ey7Hq_jLEB5J|FK1@rm$|c*Z%1NjzUe=oF$v zYUQ4g(PRV%x359l)hIoG({7=)WndEHDM9rWAXrkw z^6;-Iv68~UH^IjNK%1%S#_7NlRmau*m2G+>Alxf%$f>lw%5(JbZCavlOSqz~0tTAw<8e^YqbIR=idtO8v?GHeVHVd%1~AXgW)3`6aq^Kp$U`qR!QyX4 zW3{|Poj0#*TGS4ky?3DNYMXtOA)VYi!I86d>HhrpygSP^<#Pho6(71 z#}FgV53~;sD@RJd$xYlUe6g9cc}FSz3!(>GBFBejYhbX~N9!uQbv-NAHHImDN$g%6 za#xz{uf9_qZMkK&5r+P&rz|@0Qkub@J3797;c*ieS2d4IY<$k~liUp|3`{}a|A|3X zd{jXiYG27#!tUu+`n0|xI?e7h^`!|oHJ0HB`+MV^ioX$(QCf99SBTxqN@5tfX-hXg z0CU{=exT4f1lJr=j@C1gxCeiUPF>?*B$vrY`*>ys^ZM>ZpFi~^-hAb`5!sI52Yyy= z7o6c|x#Row0N)D^{s3oByEt~$e{w@}^U_Y@B`K(}?$*6SqUS%!C-#1hv1DLXF`BE# zH{s)@mksN6Ty1?iO?m#Z45cEHaF6tNTMp6aM$zdDlJ_31X(6>2*U82)@@&GvZ_H7C zpALUMr%paJlgnQER7>@~+nh7{g>3eL<%%i?^YX_VC>Rg1CIJOSvCQ*lWev+Y!}HD3V*2^eitN0^SqK;_uFm5g~lX zr@ZlO%}d0nw9sAO2~@aQj1@{EHh-wE6|X5g5mWsD9jhyx7T2@W%ZH?G*d>1+vTNBX z1?({FZ^pWot~p1KmBL_~9N{f-hbp@!9QjK&581h8)Pfy)NOu!oC0aLt!4C0jpLjP* zBKt}qql31L5VdHr*tj0mI5?~uTb=e8Tf%~~b({E4cLK!1YAJCNlM zbEsT7&_lBE9kRGvQ1oE9qU3};tj?~xVh}bvZ_h8AF^^(R|;}CZlT~lJctN?U>o&1 zdr7K2Ft>|AZ9x2>#*V7to!Rt_v-Mp|5}Y&bhoad=d>)PlvRmy9Wjk!zY3X-8{eFwhRq{ZRj#JvT?2vw5*!!F`DM&s=?!;`zlf2Hq#~RHQO#fH@(bK+ui1k6&h4k!wG8DeK1q`t)&O!8Oc=?;H@%D|sDF)8`<>jZocp!s6!viG zuwA^V0I}#zeSfhDIPM!R6Re6))bl=T`z9l&B06zo^RDrqj(w*(0@7jwzvE7;*p(iu#Fq+4VZzc*El z6XBAlkYL}Z7CobC8w*9?2=z}IZri_o3ibbNC2b$}X+=mhF@tXRc!U>gg3hc|jCyB04ZQCf*qflZ6^E|i30ZJCj_Vc1QVR(`0^Z(Ih$LuSxM;K6wi zveJ4Cu`fpTrSzrwcu0YcTo}(%_l5`H5A9Ee)JF|olwoaGA%#=sw$?~SGt>ZXKf@8B zZxJn|LTFGt$ObVILf$`}WM>0flZpRx8OC#HOP)7_KesEhRS?-;P*{4R$q?Sf0)_HSQYqemjb2DtD299L#aa z^K4D5S!t1#7{7adz2Q-tvsc{Je$l;N#-%e$*v=dNU9y6qbku;movm?6L#%M!Wt0&u zT3x{;%COs<5I1e`v_FdESuT#CwTyQm5yPbFH;b_OvKu8>XFJ!t#oqvs1tF0xBO3DL zdfZT@WzfepoDa{&`S7T!B$F7C+oO$9hsU9XDugmqAfr*r5NrY(>R*M-!otHpkJ9r# zPa$)*mRQG~0fGjo;xClR^dlf(nOTX93@MNmnZ7y%OvgtjL%LD6^kh`g4wXIVrmm8q zhO}4liZkTam7bWS7BG_Vq8Zzvp|)C@2&6B<5@jSQs3Nmgel7oy(?-+JbrK>;l__k_ zVOi!VM}wG=Cz8CCPeXx6!3Ef;A=%;ai2KU-n<+YAoV{OAYw=ICBGuiP>t~yzLHwst zaPs}AxP{&y?$Eta)=Twz#Q}AHOAH$E4jmB4vo3+#u;C}V_YF%vXNy4Y@pimJ1Q%9) zT3Ue4U^-vMWoSA^7s-<{$jkS;=v6h60CBH?esR&b{5W{x3?IRXiV)~=x9XD*j8^@F zWql`tOosTOiWe`qi5Ije+Hm2PXPX;w9E}+*G)H~>9CwgRLA(a(pP?#d9K_6jXg7kX z+0RW-<8vEsw$qi;);HLF*y;k>*wk}rJFmeDj~;g?Ohjy}YO;wBeb_S;9kEu4r9Xl- zB-{kjCZ%mq_UQ6eH&vD?T4!dLz-qE}>Z3>KDC;{5H{#+K^yVW+%W}6XQyApXV+f(d zlo7SaWYOfwq2dvZ!eR|yp5b5SJOW0xUZqS8alCRQ`Yl_#-<~gt(c$|?jPt&EO+smS5*_BVBXh=L(4kuso?#e*M*ov*Y+4W6X3amlEb<#pt8RL6iN zWX3rS(UN_^wf4d?TK%b1Rki+LD4OgxeoavlP(RH2HI~oP9+kc0dR_=J2#bG-fk(@~ zZO*(mI&$WaAFmiD9?#UbHOSBx4qht<#;@kqdIaNkWEc54@j~95E){5R8zN$Y%cw+3 zQ-83qyrBNVz5BW}dUUktS~VX}OJLg5^g&$*F5%M%BC?Ur0cxFJ@gfxnZ|3!0K1<{8 zmJ9`U7x|kDP*+u8JZ#i;%Ti*v8Rj;VZ&auze*3mS%6;FWr*1=P)iBqZ!e)tY=yKr{ zopqL?^SvIcB^$LZhzhCJxM8)UtQhIjWbu8}hJr2#)U+I@COUPScLoG+gtShkWS!T&l4iu{J-kzdNXp5R zb5YEzZ7uY>Arav4(+PR}Xly>&teyAKQ#j*P0n9D7j?$G{B>!0w#rjwCn>VP?ojwBN z`(^#L4?~6?_8gq_ANfx2XLFOPpy@9_=OxVc*(zlEk$2b*nX+q<8y(fID%z)xESjd# zdpCwXZFirF#R||q_N7e11~ELW7ic_i5ltys@w?w|Y=?UJrtjB9%mf2pVu}u@orYF= zRKdE}`WzJ~C%T45C#n{N8wD)5Ig>nFaWZCqqGX9M_uObH!`^r$d};0%bWx2EHi8k) zSK+qd&8nQK>^XJla{KGxLT}MkZV)XQC)(ZwIadKig5&!L*MnI3@8%Lqmf3cR1-GwrXUf|Pj z*E;O@y|p+V^bA-SX}m3+&FVoPE*uhyW$Gx6oyj@pY9tVIJwFnj_T@%pUuX+nwpN@8 z}VNJJLrfNd6anCT*juQv2}1~qnfHm-*E zBYg<2nF#@iYU+mjRFvLx0Wy2}&}mDl{^%(^UjZi4t59Ql+k5{I|DboGT6IvQ6MX&S zRPF=F^m@{Re&JMCDUg{2`&GDs!l5Jv>QBCV=7JtA95*N&%2lDHiy*n0F2dt*1=3~d zI`pZ~U3$*RP7Ca^HAcU)W@pq#hP1%Lg_x09*w=Bei!MYCylZAxtH};I6=7T{?Cb*B zi$-Q>qd%CG-*>;ZeZ%SeFMP-i>6{IH2v{0nUD&zlh$r1N<`XqMn)>sRyCY78LvWEz z-F~}AT~qB$d=tEbaW43ry+T+2SSt_`+A!IhCr2}am<`1BdZ}8jJ;SP2Vx=u+NDelY zaS4V7cH~@z@g0Klr$(QmpU7)F)_m|0Lg6gyDt<$;KPR6|e7SJW%5V3GFQ)CypP(NX zvngCM|B01uo*exhS`D-i|H(xF!Typ!i%YmUe{hJ%n*HfZ$yuK|HyG)k%m!W5g*?wF zgN5$ClRQrYsKiy?zG-PWT8Jy36jkR}{qXNzA1-v>WoU8ZLK2FSO!=baH(hI*`EFcW zSyonw=_Xq95B^Lqz6ts|T3a$h@F!Ly7@C7WOxM|Ku}M*Oiv_WA9l?k*zgS+O!a#=F^uohaiXPYp*yTiplI^_&8qcT```&l%Gw8Zd<<5OLMs=YBUM+rVXIE#^OZU zSc)~oy$C8?sm5Mpp@hTjm~ABCOOhN~=s~vc`#vU^ye4R^XM>)gucpzcdL2GmrWur2 zb8>=rM7z485`*+!4G7ew<9p?r2dYcGz9(?5`fdn8Va1D7VfKc8nwItY5qKhbb&hc{ z4yqmqhsDMk6zzVIHBwqfB8%V2>Pz;n+cA7Jb;+q32;Vt8ag%4-uOz_P*-9Z{4y9Hi z_?~_y+)=!EyC+mgP&P(u1?1#2R4^v9RQsgiV*f*i@#i|3@X-A|{69lrWC046?{d8| zJHxS3_5G$CK_?b(j6L34KSiK+UM7qZ_Ap)PoIT~kybO`xrpmjn3OpzXg)*f5lsVk4 zvMBE18!Yo*XIoK{++nv^<1yX>olEDekI16Q3=t8Sf%Eryb3G~nmn#QYYOfVlhID1h z;Y}j&I3#Oo&fU3#^#04-^w}mvpCmH+i}IGhw3|dzve%SS7X+|70!1VVl2`Vq&z2xo zn9*?q7i<*L&oY#bOVN%*FUSRl7(iNkt~^kkP+NWF@Nrjtmh|`F&9%>t%6721LF0|a zMZteTGZ<+s-D8JueU;m!0D73BuKi(fRoL^N%>H-oNhz7On^@(&?#a__f4s6-qYs}J zz5KLNk~J)pjM$qbV)%eZ%2!Np8>0^v&OR*-!eXWNTkZbt-UupZ>awI8pWqt)Wo$<) z%Y6cq%$6a}mGOPbayZN~`JEOWi|N4qRz-KqlBGnd_yWF6q`4@b@Kq9Y1riioD3y~W z%ax;-$;{%zqib$g>fo{UguP6Uegwt%!8n>b$QxAQVurA*k=>%$z=;{^PS5_yMSk|A@eDSl=W zS)0e_-AC(kGa}a=YeB2X2B@-J&`xsi(P=gzdWN?x6`K@{m-9IJIU)-h1KBXcFU5jDdQ)(^=mcwZb>6nHdEo%>tGBl zGx^Ke*DC#v12v@h40{G+-^jh>mAv%dqt0_J7ZVP%a|MdHHx2wa&hkUG#&Chceom9P znYbvNXEirTkVepg z!Zj|r;ij$6;-Z>zv%hQlFS)iB{o6wT21A{`?t8!bzyCfk^D$HnZ(mqy_eeTK@0a>F zJ$LwxRGIZXo5vUsAD^xH9q<7?E(z^NM|OWGJg!?m(M$E*9sZQULP{No1)9#zyt~}@ zpW(@wLvyxJGxkaJA?$J*KFf^sqdM%!JjDHcCtRxG7QD#Q&#UuGRkD>vuOuxwSWZV& zR${r#QS>eOA&7kkoU zaNk}bgNv=^TlboX(zgS!5_^?e6yACpkYbDZ*7*J-iVjk(+i-PK-VhgBxUQ8L1G$_G zITCX}2VNw1toQOQHIV7|?)LEBIdXXu)qov3jG~_lZjzb(DlW59YPlb_hx;Z=;;SJM z`E-N)t4jcXOmh$74jF!^Q%$S!aghw~j5k@HclrB3f}M2co1sqlkEj(V-3~7jH&1l3 zbq|2RmDHVrTT;m&)(uTu`VnklN$^EdA;-`S5 z`fMM&+Q0q^C{NhqG_3ry2v^PaJcv^+4wtV?MYFeu@VG!zD5tFEJq)Gv!bJD91k(C|xq7!++@neYan^S765?L#PuWt%0{iv0rBNrpdIP-M& z@rp>zIk#^2fH?s{3{UGEX@)*@7GBkm2l`_oV;P7xT5IUXqeM+D-4QucVz{EwE28ca zs#f@}jypBSHnP3mUF>{`CJ}maFop0IHL5ScQu1Q>U`36-AhQ-E)jQ+B&l=xQ%U}8s z?h42BGLd&*2OPT{CCK;@Bs}4iOJfpeDq%3FDFcc(Ahq4M3%E`iMRg5E?T({bWAxPA zGPAGoY!$|`xs}|;9lN>f*=V-Er+Xk5#U~#8nV1@^50Mo&Dsbdq(&2S0T+8nk?EEhA zrpe-gRo?3uOcjH9d<~PTVby1aA?`t#jC+!fyW=FzQl9|uhA-#*FHh$#(=W%*j-^{d zQGl*UF5+$UAU0=^%{TtYbw{q|#`FFm5c$BVdDF&Pf8~w}JdB8}YlsE5h#i3;x>SHpItY?5#Q6JflMi zJOzqUYR#Y+0a5*Uc3v5{r+D1wotTHQRTTciu7p*}^;*qLq~`wCep=SypBGU?8CK>> z!qB`}NuWm*dy7oC(oOL3uf+gt68CVDPi(S32YM~HRP~f!_qbblMk}YXGy?<|p_+Dx zHKg5QRD;1*8kY)-823t!K{H$y5-%MXgn{B{-yf|)ueJQnB+O6_Mv%bLGA&X1h_gXE zhF;y_FE?ns+*1k)EuH-CCO=E!Km8Gl-LxE)tzZ_lv2S>D49c8R`D+=7kfGrO$PHN6 zRC%(A9j@p(;oEQEi=@}uDqP;uo#D)bwkZ#!T&h2cTi}x%R4E%IhpR~MRFw<2ykTas zNco6O`d2h64oV9Wk_~sr#-|d+yZj&&Of{sOZ(ph;x=Lxfl~5XQv3;uP&P;gS9!o7H z+IKI{9z_G`3Jt(5bw2h4b`AGy9TlP}b#E~ch8*|2lF}PiN$+#bCHqv&rJ9SIceKnIlzDK7( zLWk4P2x3&v#sj&7uNV>W<-wD$w{@|8&&^QuIO0-_USw$I#fb=pE03o5=#T?FaH&X# z6vG!8R5NQYyAy9t7KqYuAAJlEmlSCD+G;9+h-5oMU#B_ zqu{5Mq`rQod9NvpVHqy3Sk39+L^#ViD#h9ob|ylIbRTF{D{zi3kmToG86hO#X!#~> z7R8V(h}a%=7Xm(Y9Ug&KRD(bkDkm{PFFwh^lJXn9Bx#m?CCM5$H(-yR%g6*LjEQ!} zv@aH_;o}x4up;J3mhIW%tUomB{g z!k2DnC$E*NcJ+>z@+1TucrCcVX` zaZ9>HXUgq0?o~C9p+D#URn3(LHFa+Bgn(2bKwTg#id9(^8X%EK1quqv(y+)T0BVhk(Skv1oZl-BE?R#!WJH9p|j77%Kn-Kg0 z9Lbp7zgEWR%~m?J*l0pcBaKvIF+_~9>b(OLuf>q~H;t~ZSZh|Tp(w3^xmfwf((;mk zmE)Ch$9L<>ao#9}I1_t;#+I?)wNO+vY@oVO3n5met6gc5dK&?R{j5ZZ$$#bc*4nXO zL38ygM#po2l8c>txqZF)d4V&@c(HrL*y;U?Q>+1eut@vSXD`usC>Z}ET&X6<>(S1N zt|;4qM=vv66wem-BpVof%T_!UitVmYCz*Xm%te!4Z64>>*D{Z&6DvDX`6d$e#_)7p z|G^nzVNHvVsebuNz-t%WS-CE%U!av^lgtk0Zq~kW<=^cu4dass1{VIQmF(4crxJ%S ztLXtMx}r8uftSX=7R?(CXSkewdx6W;>}w=4<4mdDg#NgV)^T#n>A|2cwETexi#jj~ z{G!baft}pXsp!lm2hmrnCEGaJz6dErE1V`*oR0})?12p|O*3cvKW)y6uYGcw5U&a2 zq^Bj(zeS^NN|?@^64vtuED9NRgXZUt`BUR59al`3pVlRF$ufJKHG5ZAfxhkvu~4rkQ7z_s=XC$C)KYh?Z|>5`m?}I9x<#U$2oSw$AAlw^mojetwW*{^rrsV?j!OvUpXMTvaryfOyFx0o0b2}sV4Wq#|{Ia zz@z?@Gg7o9pmYahIFOn|NqQ>l2T0!yQ)rQrq?;NsGJob~OS}GC!dLREGDNvRF|bLv z$=I9znZDhkB(4PXlk;QsLgaoYIU8O1Xm#N8NnDXig{vX2Z`pbQ)roHc(|r2?KtDvi zn_<1#_k{NaZG4A(H1gu&b&r-Ox5RfJ78RK~An^f|(L^qJwr7^->sLQyy$K(~wc|~f zMCF?N1oi%po4DE=9e=J#1kkI%r}mw&56AsclR&ixIE?X9#AYZ=0R@&Ymwf;rn*ewv z8h|tb&_5l23G2i?7En)xQ)b&H_C4=}fyFIYPx~IZRe~bGBiV@twV!*v9mLt|{iVBg zj;iW__Ne<+%iCN~&`I@s@*z;q!Z+1$0=6zS<21pL<#(q^vlE|aM1_`bF!0{Uvebe+#?D4X+d)n8brtwfe(#Apob3?Blh{2MPpPw5t2GKx zxg6grY;a3Zy{=qR>b)F#8O6+e|9)D-SL(AOXZGc-cYAo*8BB*!Y?Y!)?#&|t5xcjZ zZRHNZ#(`?U`#fhI0GtZrjpB#SBE(r`zGvTDoeo2-*ST2rzqIbP(20f)3CB!)nvp&> zUyFRVq8Zx|eM==d4%8Ei+(6G&4gdvEE$K7qmXj^dlGR2VK_~M|)$ovXofTz4P_q&dH z_16ehaud{{`R(tGcjn#>#b1BPAyussPE=C|k=kC0*_m^+=qW6U6-mET!u2lI$D!pKe)A+_8quX$v0rJp#;0Z=s}VT+wdA# zKJak=%iAu!{dk@avtt|h_qW{#(VeE@$;P6c`QN)eC~?T1Lx}SGFEFN zu^w4h!i~=bgHpIRq2(lBQr&?XUV#cNUpqKNzUMJh8kB>IE@)s4-918MYvoU;aLr1q zJTS7|9e96tikcxmb>ekxuIth4)lPCiek!rPNv6T!M;Uopj~}YIj?=cDVLY$iK-fig z`i{}8W0#x0)CH!3?)yMoeog>M$x4v*V^SRQZ_^QCkn}x%;%X}aq}&zVF5*~%Ql`F< z1ja<06Z@7vbr^Y%%4BP{5kSQ}xAv4%ijcemcX^J4m}D(LR#zagPeFSJE$K$Yu}KlR zNIus?_>;oTy=U}-pPTsHXWl+ZU(wpiuB)kf;`n-C_)>7rC|9QrO>BX;VY4z6M6w6% z?@7norsyCl3yW-63}?H7&9W2*9%+6Oy|VyUplwI?-r&S%hK`V*IkRi#7!%eb6Ky9e z*NV3wN;+qxij)m&nNrx&IjgI}$a7k9)gJ-Rr|Q2qX;F7)FkR+$a`g8I^C)ymdQqWS znCR)%`hY4#TGR((->Txb4>T0kh6YBoY5Bb=^0{nNAuRARWymAEbI9klg_L{tiXJDb zT4sa#Xx6B4g-sLsOf@+3iOR1Hhq zVe*ksS)f&ghi}ill%C{$KOfuY+S>^$n{zN~+}+KO^FB1#pn_xt_6Pw3nYaF0!M`?$ z#}NeE;GeP%gD2Dox|Q5ltb$>CATTWlzOXgU-Tac%^R0wBv8|<}r0?dySgO1((KP4AS(~^(^%ra+T`=P03 zf(jVaCjiEbk|xI6mVKpxu7c&6^^~?&%UH5M*eegXl2WA3Q}~+43w0)BK<&lx<1h=+ zWGtjD>61V}qpJ*FOb9)YIK95Lz&xWFzxs2;UrlZQyO&9r*zzlziu4x;Z;An~T9Gzh zk(UA@gUy1j1p^O|G29qw05>*(Bi)Qnm>D6=j7*Qh;bw4n=B`QZzYyTAUc!XL{yhQD q)5{ki(EUq7BnB6JJu(1?|2H(K&qAzs?Yu0})y@Wenr!X={XYQYbVcU? literal 22038 zcmeFZcT`i~wm%vJ2-2%`P*D&N5J5mXA|PGqQk7mq=)L5Nf=W}Wbm>y169^>=f=Va! zBoL&RfYJhlz%S@I=bm@({r$#$cii{Kd+#s?WACi;S#!@h*IaYYHTQb;NKb=?l7kWe z0MKYYd|&_okbnUIQeO%ZVhK5rG>mw;V)xKM7XS$62LK|U0|3XwqR3SM!0$Eyu>KSP zkbMULu!6H&AIlRf79Z&tshypjEiNvlq-Dx0so%UM@yy<(zM)yqz!d(eYkC^#;}?v_ zQxJ@6NR3`pPW=w*Nq^?WmUAS>|8-U!8!TG2M4(2l~o-Bi-EzR z!r~8D?AG&`SYr#jvdWr=dZt-gn$v3~>YbczOr8dU%7q1AZ(nm6TO|?;jc-9W#IGaLusponDE$2x{n{TAZ$Ua-~HcmQ}w z#{H1^;}ZDc6CVJ8E$jUMf~H?o3bByfS5sGwe3g=&p5e}f)(=hq!0Ay(&wve-Dy-A6AvoE%20L{yWtCl%G!dABcV2jypf4aOm zuf-S~9SmJMOt^qaYeQngPZ-U+vreGMXC{}f|8l|6n}5F4A^+6i(t}@bn+uh;6`!X8 z;55_Na0-!mX8)ePfX`Hs(*4p&9k(aRBY9CY16I5MYp}FrKo}tK6M7i6e*r@lpz1@c zWc{yH26D;{?A%Ws_xqT(yx+cm0nTotDsA5UVgkL}?YI6}X|O?btC9{d5y;UxqULh8 z*2q~FJ+fTQ;itsEw>|%sz>2p!K72>mzrEh7jDeT@lJu=0$9;@7s%RWc9tj`+Hq423 zG%obu@j*Z0PabvlaOE_>+ZF6NFT}th9t%#ZS^@e>%&ly$oHdVyDW6RGnj1^*v7?dB z%f=NO`NOIXtBErc1R5Z@RD52(Rnz8!5!;BGGaAs#dOCw)1H_>q~5;WRYjwe_9$=?RtNel^ZRdDJmrJ>k7ID zA+LX5YzP?D<4GMUjDKQ!S3STVYer!b`3hTe=^p9|oxDK@N{keejYUvUgwY9U-J&kK!nuE8P=5PLjOA zeV~_25M^o(@ZoCLqf*60-ZT;ST?OmCy4M4*79FITa;ZMYh(tR9bq)#XfcZ$W1qObG?nSo~X);xi3oAl>VDbch|4iiW* z%yR*-M4S7uP!=1-E&uKNu>cHdo6 zBAFRKz`i0ML93Q#t1D-vU_|Om> zdAo?+$Xr30ZIoK5isuUTzqq3J4ZyHK6 zb2B&OCDcus;Rzkg9(RzA6%CKuYn9T?Szm0qhsIfb2ooZ!o>WuqdZry=yrHsYGMzHQ zq>f{4!BcS#TUQ7n2!Xl^fafdrn94KqtXDJ3!h*Dr3YR7`{wTp*$0*s-x0(WKPKBvR zB_F%-dU)!dkwe55@@kp6-2IlvG`myUawp0=5yJ=RnLm2elo zXLkyt<%9C6{QWZcV^{aDl*z;JdO8$TKZ!eNb6W%UhTo6j_6&cmkY72r;iC>M#BEdJ zqMW;~QganSJTFa+k52@AylZWmvNam5J5rCna}S__wqH_ z>mYW}?(?hZqzO`GJqKzSca^RM2-vQY7XW_%s4|&{v-P`XJW+0H>NqCzX4U z?rF_1eHEo9>J1_r?eTYFTJ}K!Cp{VoK0KYcA3C{ajIT!M(&8a?!H7+5$GC ztu>9xNrIQzeVo z^hAH_-*G^_(59AMF6YqC)hOHH|JFG9%BEMko3b5@RMWw{4nDdg6}KaY4Nwpj|Byq+ z;p5FI*=VSR&b1o$Ng%713eRt^%f0!bMV6nofY#Dryusp>*~NMgDvF=mVvFAH3&?|4;zXl6MI3CZNk5Kgl)L@~{V#_6A2dYZl-}?ks=cdmnnO+%plO~vpKfyg1HX@idFt(7Xw#=DzOQ5{2a_d@BDr3VvUT^PjT zceXTpzfIs2s4+g*FQi$%yb{PlIJ%`UC>=zw5)=I2cIwl=8umfbE^6(c|W zr~E27*L_yJZz>c|FQYHD{m#L?Ba9+jAS0t7JoFjXdCogd2+cI~3+(0uO zWY>N-;YrUjG6o)0_b<4A$GfwE2u^bq``}&uS6S ze_v|0qg?nkt!g$MXF$I}6S||;1JU5oJ1lE!i%fW;gThvDG)A_LOST&4fHrRQwY)R< zY{t=S_ZO2FzPuIDYIZ+IPg`q4Ew+VOxY;lt*7WWrcNgR((v*#5+OkH-%_`<{ZOVC3 zAks8Tvf;U~voDkR=lehWsHqA(Y>MlL8LJCGs={Uu8xrPK8nAA@&SaBs*V)LIogQqk z@jO6wIKRmHx{@*?l}kR-BxXMUg0ku<`#*kBph{jfg^f^ZV5D>#u(2jGz-AXBY{O>; zs4kNSzJMhG?vd$?WUT^|f)KyK_`q5GGY4wITo9z>Z@~X&A*$}IY8W|-Sf}WJUT7Up z31(D$KH<%BRCszO+K|F_v|AhY)=KEtx90rR!?%j-rxFb*8((>-UY?joJo|b&6R^E9 ztR;Vx-i16B%TEp5*|o;mcv;^)o-TU$IV{p-r8cbdxycVrrj6i(bf{B`zFXYXgs+ZF zzaU@M0qUfww2!cp=`c6PV+uGuu6P7ZTa|Cszr{<2e}<1>X0#|BU7=EsLa z6Ne%@7YgDI-(MsTPw-Y>8%%n&>mGlHSoXT{m(F2G_W2kR6ZLn)wzr1PJU+vATD85*>p1nw$B5a?%~k52 zbq%C3tDkk^1Eej+66eAxxQk;qk8A9{$9UGRML<`%&$!G>%m^^FzRmrWHLh*NDa8nU zcLFY@5GTHNHbQy&lhF19cM!y@8q4zG4AGouGM$)D;uo#to}5A3J%^RU-xVS#g-(UZ z$cf{Nf08kxs#P@XDbt}lp@0VsqufU0t2k+d{n!u^V$_b z(K%6Yl}-WFzL&%%7b)$r!hCgN*0Yr>;Nk5{0bwX=IdVu;xIT1a!Ex#y1=n84XDeRm zu0a*d`#GkqV<@{nEs&g$3!dy9sPUnfS;akv{R{H{AG{1`d6so;-+bqGZqR*M5inbY zbe?xIoTbr5yJx z5kQqadfYFej+LEUI~^z`rLdfF@dH6#jO#UgOy?2>$KRU+m+SbM1mPwMy2z@92Bl|B zRyk}wNSls&16MRk#RAU+!DXO)UfU(DH{1b*yH$d zW2^jW_S0hxAC(0`aE?c4KCF4-bmU_7<8Nm6%rEjilS(wz?_PJ+F#_q9+`$Ky(Hchb z>yzJ=+Ix?WcTEx%$(@bBPd?d-u5TF3(GX6hGsFa=SEVNxO-TnS%hqSx%@A}XulTW= znUhfj>EcPnw%K7}ixp+M1Vp+@*{`**<_LvFkc0W%y_Gcap%VB=@|_$T)DVd$$7dH` zk;Xb9d~(%u*-2*IRQsaOF4?(mu(F;}m-k{#`z1z09MwtQ9K7Ue5nHo`Zy!j@25grF z+3h`{mvKt)J~Q7lEVLxYE9x2;^Yn|i6B?^(>V|4N*)<)?ch-Xzdds)1y87DmaofgA zz?J9HGl3fmpR#B~eMkK5Zx$m9oT>2J&J`ZJCQcJB8Qhr%AWm~DHaXtayHyYT93ql$ z81LKyt06B(J0|$=yH0jDovE)J=(|@4BLqBfX|2LQ>Gi=WR&7m_*=4f!O#HJY9ydSneo{im8Q1_#%JzGX$rIA_~WZ;iN3 zc~WNjB$`+|w66c0^*tyPs{dk~+3KA4_{I=W!_&IL!u-ZmG=g#fs*xh{US7~s1w*?G z4qw%jAAdSWQW#To+kYUytUe8a%zL!%ruAktN(39UfX#Z?T9L-lDc1e27d|a}(I!k@ zhC#;reLP(iW9lq4{k~|)-Qt$WsHP{?hODleeRHH! zeDe?+sxGrk@P)_s_j$nd_PZjlWxyn<>&sW0#=K=pl?=AE%{XwAy)t4baw(aBKc%3@Lo^qbuZqbGOLFy)oym7{ zXn8v@)2*nUHg3E=+bswdG?of;o5LmaVAK;E>T04W$*LRq5=Fn)2U~kz1Bb*{1AHhf z)iRm1xZ9hO)ha5UPjE_U`k5LA8GBP?BV$+i>6%K)9>{@(gQYpm14D{=2iGGcV?wGn z>7kh%C8(ZSD^>k)@a_}N2KCZZkG8N_)U1F&+AIgGQDvw`+;_HsJ=skQ&kX%lMbq(; zBpnKG36Q==S}k)@GO?&KV<*{~w?=42O0|~3y2lc!-nR#RX#m!{_>id8g_vqPQuEQ8tRH|5a#6RknGhR78@OO>cP3p=QzJQ;{XQ*>hmQV+@6oa&&k z+;@K-QrROky7XSac$Xo;|H9fW^hZ|4^pPHqF5Rr00Z#5t6%3D!+hyqTGzHGNP;?ts zBsSJC8?5+wWE0U5HE7XQU%~~Pyo8F@&c0+>HbA6wF4|{X$)lv$-y*lJEblVd%w(Nk zeO&U>hf-Wo;S0G$)8%+Hzcx_g<9riS9yzFqMq_-q#~{h@$Hw4?QnJnTRDGs`;LK^` z>c|~&u>TnQ;9AhPX?RQ6!?JJ&nPwVk9(#wq$bzs%6Eh1P&VYIkX!KYyBD0_mT~dcn zR%u4Gwa6)!K~*p)wm5cHz1qf;fV9{1!ILdNf)?|K4{Ox~4iqipGG4&*Fqu*ABEjHoZ5W zcYrK1Kk5n3oc?|oBIDnYgF4f_#VRJNswxX%3R@PTquRVHb-h9=hN0T0b9%ejz4VPy)6fe!ooobUg+Ku9cYiZ(+Q2Ez;O%8t z`_`T2l}8(`V7sD%g?)5Z^5GYVTMlpQ7QIX+b{Gc|bMU{}SY(2N{L@gRP+pV$>jTO)T@Np~2)%x%ctUC8yGK?#o zO)WnD?5zm)6@8<~QaV;G?i05D{L2+cVbpl(P;4dk!AQ2OC<{N>zpqBR^u#r$vq@=b zvIfEzbX3L?T~VVjj?^U|2Ea+UMoFg><^bhLuVDnq(|V($NWHF`skQR7_4=I`+&*I&Gt?T7A2sV@4wSfY&sUx+BD}rN>%= zDfAuiGouI2mW8JRbdT)$jhTZlLh>ePZ%PVnV%W(zu(EZDnLdpeXsY;W=GQ0qk3<)G zJr;9#Ds?_t z`+O|?e|4-Yw}@e^e*$j!Y)2k%Lpno#5^480Iyiw@%2%{(?}92@KY6@7ASC_=g8x0p zf0sIwz|Hr+VwLfcyx{(8!-^PkuJiOP06j-n#n>> zIsUCPcUP42+}ZztNX+MJqk6=yoyQvg13?$W#h+gWz|-J>3kBCF-V#$N9y3;W*^Sdk z=~ec`8%slJY6tuOM|d$}0+VzW^0_Tk3L`DDK<;ovbczGV3{jZQff4RZap};e6k1SI6Lg!yM|1YDv zXa75S{gV!xA=6pTLw%Jr>BO03>tR#I*Y0lmk6*=o!;Df|l?l)8H()nfAI3nZh+22c znK%Po-$3c(Mr5POf_Tk8jZ^1rje(I4B`&MwQ^^^_lm+V2`7F2IZ&K+(c`p*a8cq8t zW$|RlX_k+?-w@o7k9j%8UiTR_`DkS5t2v9gN}D7dw!9 z)#vw=s+}L5(yC{2D`0RdTSy5QcpeF#ega?ph3c=y+09A^TrK+)`b+k-7k?2%G8>{6 zv7+#p%ehu*QEMyMS$f7tIsI*OJ_{;~S?=WHQZ=LkHoQdkj6Pyd&to^foeW2fa$X-^ zdXaBPY?HAZL0NUhe2W!I+>7UTO7XWp%TpDGN?$vOCi~>Xin3iO1kZ05 zPi}T7&wen;I{uQ8>Dt#D`vw0v6jJHZkkpY5XZjV+Y;(m)tLnZsCOS~*wb&bppvrWa zR^Fd{fl|n5GpGYz(^$gLd`V_E-0{VW%4gLn&g)V zgAE3)jM;)5ro9yV3A&Zo_x}DQobDVSgN7ga(sfH4he85KATaVDSwbCf806Bt0{yne zFS5Tr{Jz||Xt(IreNJ24ClicYck=_{oeIX9uj9rKsy5C(rQ9fZ`$drpo{xf_wvjk6+>n&&zvCL#}j`ode43 zAM*SzR5AMJ3w!;^o>S28680#x=tqV*#$RN!G@7gN!X_3;(|Hk=$62Z%}lab##@b5U#vje@QWU9vV zgFsY8dY1p~1rDn{LAX!35TE^z+B}00rS)6<6etDyKawEI`27tG&3XEoZNrIJW`{Lw z#r}wqn3Z)_q4=|t%mN6=%0G3>OY5wAsttH5QIxoVXyg@m(m_gpV+z#yS_$WJq<~?3 zaBVh`s0MV?l6)r>m!PC?|0qaT4R2m~q&EGla~}QFJ8212goa*XWH?vU*-O%VmXf@d z9i*=)4(!T#G*#VRB|iHV3O-`AACaGVysG6qJSEW{8BzbcQ^5yb?4h=^V8D+WJM%Mk zbImwOe|4z4aa-u6fja!{s6JbTO67o!WU|no+7YvA)S}@5v?8 zk(YU3f?&Hc?`->7josHS{70Mn&5k#OM##x199B{y%7%pke+v56%`SKG=4aJ2c4{i# zREE+y%u6!;Xy?{e?6mt@%1g{o<78jZ`bJL1bvArVbOviMXb_Ii)HDK(4X3PncH@-) z=sn=^*}f^aISG{_mgl~9VAc?fdouM?{1I$13i#kP`%-r=8h`G7CZf08oM|@Ryq5G$5%np7>ARwya;S&UJGCJd4F)8HB3S6m5C(X+H3i`7KjZM z{0pv$nC|uang1cG2T67{WZg@n>W^zi!z^BU!2TUs@qa9|Ah@mV0;B|M)x3nH?s0 z#%B~U_V$NS{KC7@lsiFpi;WTuweTy(E4bR5q@qxsUt~KIXKXTZ(8h+SI~g8bYt^&c zk}k&Dzv|g5>Ev`@7u*JM{`98*J7rW{*kyf=Li~;c5n9tFrY5P*Y--xxT%P+fc+uJ^ z=qmWr{!6hp&8J>mv@7Z*QMzw~!b|L*A!N4bLg@y=Ee5!$^|2AOLsalh$~ZW)!O`m zA8q1<-lgY;_MJS`FpyVtWDo)uT1}U<%XxaPa+>34{a=jgyFHaNLTZ#&;Tx(;%Qf(D zW~g;QI&bch$JEq*>?PEGcv1c17<)-py-8Ivy)~)AoM8}V!m5Hwmfcn?bMQ$iZ?rwQ zB2mh(_syzG`OInw?9lLf8x1EcTpR+MO~mdMiNoCW*hRsu&XltApH@Ldg|(?7fnqb4 zr$$S4z86iD2wI>Ks($yvd^7ZtzoJ?+RVXdlXOGFqwzQOJbMvlHvq4jg5)sj*xq~>) z|KE8}^zt56WZ)AC*qB$EKZ`D}@w_54_w9BNcLXs|DeU)eOrr%@c zwLP=a5G15OXVQh%yE4^f7z1QJZ`?oIC!m*x3(^@K6iZ*E@{l~M{81B1zbIg!Jw6PCNwW zpRxa$wEw3@WNW;vdCGt5s&dRAbAf0RZa!233MdmH3P+XpN;G5n%QiZ=nT%JPN-BIL z?Ixv&yLoGAmhtQms%ni;i1?a*<72Y0;FjAZZ0c3U`sq2N(I85V$zZqLN~@hGVt?j+ zPm#^Y7g8TeE~^VAMhB%$#ln}S;{0-GEcGXYEpB)JnaQ{L3HPj`@3+855~B>}el&m2yaMBHN zSu4wkm0!B#!Jit-V@_kiRz-R2t4fYmsRV;v}IzP}3R{_r+V^K7a@C@jqB z;|;qRF5;u+!kFJ#ha+~3_bR_K5{?b3!(YT-Oi1S4HhAha9nWZjsK3^?XX^LZ{^Q_v z!L;mwyLN?;g+-!kQBCbhILDk6yqUlq;bZ`B&KzB!~ ze#U@kkNGWF=)7+9wk78KtudQJEQCt|SNiZbzAFSVO{>1$S@yc1IIBY_pi>SlK$df| zvX&D1BYms z&l{F)TFIJ9 z*eR{JC^v=N*ph|Zi2G2PbwnU}l9i_AkgSSp9{pFr{G6^w0*seqFn5PTe^C zh6YfY?U*1B)`c=_XxbS(+5pZ#vFZWX65r)%^hDP_8WCHU-HlC|*6 zW6epnI=a%CF-c7hzimH~pfX78SE6AW)Ex@j=LKrSknK3~KvTlk5@_h9Jk&>oU454u)|jzl1q<;zUZ!6>&nCyiy2<9h!KY4uc*fh-jK%R;X)wY6Im+)LRVb3(D!+jm)} z+DsZK!pP-)aq=39#}F(iY%NZPhD3s0ka{m>6pX$9SVf-I4{mmU^!^ z1xFwAcH|vW(g0Upj2`pu|J=MV5#C0Ye%Z{(|B7C60r%?g#;N4>rNOqwPPXxR=SStc z<=_3pr)&~hjSnFFzhqX=ik!ZljzzeTZl!^#v%`|l`gtUJCljTAi~?~1{63|78LAKx z7Sri*O%2|M-jYTxWmK|RR*imd?YD}Q=71H;Hv64<2^;0e%b`y6GGb$UJZB+;LMCZl*;2r@Q|J6 zbrO!)z}IEntCBR%j* zxNxJR80aHwTvlqO8D-!fWn$G3F}J2UuC&ixn&RTL<$u&WKw3T7m)ND0XRsd8nnA9u zG|M;yOfDISVO52~w$y{BbgGQ9jQ9HEqVds9uDxZ2tX3Gp`%+2xwyh#S&Ws1GqqRQHJS9P3(m zeUtEG;tjBsX&>GJ;NV$o`| zuy~08pBg5G4}}eGwP=M`mjO@<>-*`yEA**iWLd}2G+squScl)W*0Ij}g3 z*mx;`u2@njKuJP&p{@k^df78~?v^B&xOQif(c&Va;zPn={iQxd)%Vg0mO?6&5{R-J{;EDXL->B%+| zKG(VAqNem1)AI?moXj<|7@_r44a+sU_mdN^h74DGWtN0ENUv*5mY3hfBMp$f zuXQT2K^nTeC)KN}bB3O{KA}y+)eCqYT)uvkw5n$adbOiYzZf3BaCp#a>5Q@6SxALFi$`u`F2|s2#3pZYXQ69)FWve4 zR>Gk9(_0>(Mjp0=Iqi7O4)nT5U?YD_>h=)wR@p#9q*EUU3Sls5n=G)!reD)p z`pAQGN(vwB!uLJO5k1tcL6&e%h#N6Q3To3{n`K~+`LV;-EWy`JF6t+zXMJ%H6!fku zIz7wa;K0Q{*TtC?)j+klpHi1ulH$)ftSh)1e{rk&3b^^IC590#W%q0uqU~|Nu1iY2 zEcWq;Vx$&wKmzt^a-_j|bJN{R=9Vz;_om_;{$iv{(pvbV<>{2Vd!oRZNQFif-l%U^fiA!eEH>b@Eaf5Oduj}0e4$3sgcN%W(}IFZ7Co7Z}g zBy)%Zxf>AwJsbk!`PwYg1hiUCOJZg~V|a{0;B2^vIVBIvOG##9BGxRw?@iYy{Ot{B zfmBkb0za1BO8>}JAZO_t;@WGs!Hg^C7D?3Xy{qVxH{=7a$iCBAs`WnCvtFytcbPAf z$dcxUs#ryg-#H_TrpTVV=d9{AH5!=wO>ALA;&hRGP>(Q{(U)54;6G|K&{&~-3BpNG zba1Hc>EXqt`xZ4;GpH>gguT9^}a`I z97KlPW1(sG*Sbgas3c%-^*18dy%IF%QoqxD+mbFMbcp47+eb+3A(FL^eS*K9TIx&= zeuN#I(nQpM3oyS8Y>qP})K6&p;KZrCvtSQP>+LlI?mqUtUL4Bxj`-!6YXdPQGGFXT z@;d_s%|;25U`i2TWQ23gTFgk8eQNZ;SetC;(J6$}Z{+e!$Is#Q!97HWTd^BrZ6e2h zV{qEXzNbO00zC}@t@wE+xkhAQ2zB1a^;b1rp6msFT+_b^&d{sp3y@XCY-v7+@NG2u za^|-qQPhu`U+gk*!+dkEqsS^!O?(UB%1Gfo!C0Z7%h1ovJyxSLgIB7=dcQ4vvBMO4GBD)*iPi`F);$?D0`WrwLRa7c`f@2s(p zT_7eUhWpvieewsIhRi10$v1z*)F?lRAxmjWTk{B|1^GY<`S8pQpQ(?Y8C_d2&N148Km~7l z6gxWYqsT8pb_*o2M{xFL7K0RB(N#;WyJ(nOHb`q)7vjdNe>~`$wTU*-88XtGy}@tj zF}gH#Svha+p1hsx8f+5r1hZs_DZj#(t!2rN`e4*aojuCj{aA2ZXXDb$rK0)*{Z`rS z+{?pLPSl9z13Js7O$wzO!=m7tdT^}E(0U8q1wE{5dGO35&mBn-4vzWEYur0CdSkIk zQYy59?7_y24`J&W_3UGSQ^`x5*YSpxs-m=`(Zwu5-3c$ zfToQg02owJDt8w~!c| z87RDIo>;P%5hLHsGL}P3ysNP>(Q~7@n=?@DyX2UUDBBri*srh?vLz{{O3rerhk}u7 zTS8O^DjvxDD+Y=O*AV=kRnpE>&81=oIUjhodsn+z4smoGnk_D|EK19<)C)^D+N84de8_A%^F~Wo zh4HDkw#9itHjL3cM=4)kL+T|mK<q_O~gSeERssMNVxfi9KI+I8-K^w zB;?KvbOZwZbSlrR76dl^<=y-W1#hvc|MIM6j$9NejY@E}N;O(|$humP`X+ym+TWxo z=9$*p5S!J-MOWXUn!bWQ>JcyG{B5*sKvmy65+}4tGBTHZaJYAI{3^eyzCgYZ$8w#0 ztISfY{u8rp_RLsQDNy@vF(F8nR)aGrY*<1!+Uuc^&-!QKg@=0gYU>AfZ)L-n57X-8 zL=Lg=vTp3dP?Vn7>$Q?uJdKt2z+r>f_ppc6{W`Xpj+u|;=U5vfqN#oHqvvNQjG2V7m&j1ST5GzL~ zf)OK{FqO0hzg&T!xn_N;sYEbyMRnGJ+cblyLL9hkR(~A7u&vb(sRtRBDmgrBPQWK~ z*0zyoyejmFW7;$4^!tIMKrzL@Tb|fBl`anvV=42rS@pi}5!l#ux#Sy9kP}4E*zJ{I1!zFqezVWKy}mJW4%?lNRQi*}T67At38Emtn>t`Hvo z6Sn**Gf%B5V3Tti2eNZ|;dG#ubG&Ov{9dR-H-myJi>xPq1r&YHbL|soW~EeVxC(70 zr~lG)@)f=V8P@qvO@cZ@4(rq}m=aI8nOE1okD0o4n^qw}q92Cd>b*+|js6Hj<2gkr zA$Ij}nLX6NZGYxzg|;)u+)W%|Lm!5Y;T*WhAfreXr#eIcItiq@9|hL$1Ax7b5Uj_@i_b>6z0YHHxLPMn%Fj|JL0et)L5kSV>OSs-6i}pHV%%Co7*#PW@wYM zxP-zIzE;9{{b9xj(%pE~#d zPd^A>HT zTL#RkRCn(YexBeO^5F*sDsL;vaBYVSovll((omWu62Ul;t^R-k`e_L(#$w*fV0+K& z%4Mb30!H5z(hKf1~$jT&a2ZIdw}?0!jAEFSpChI^sg;wa)-dw&K)4@(1*>&*){Y{AETR1;4=1ZOcYV z`|`@=t}9?=(pt8Q5Rz-eE)-n!Dz~ClFp(7h=n_my`;geIA;_7m_f>I3f%S|I=N}hHl#^oDaPQY9Knups7= zh~2*k8k;fN;>D}FNIn)|&Up1}(-wccZ%4uPR0jip47bE^;KNL(#Awt?9(993Wl^Hm zN-yCTdY;o}^OV5Z(KBlfjyk78cN{DhiwVu&Wf4fv=z@p6O>Rjgh!W?_0+Sb7#BZZQ zIDq7AOj2OAXT3KmEzAAX=~@Pwg6ubIoScL2P5CyN<3Oaok3GyOvkNp*b#D=}_=qQ` zKs02IJ86j44GT!ugK4>!6L)-Y*R$#?*L}0>Te%sFY1GO&)wMnp2o~2JamFOw$#Ai{ z3XYV3kfiTPX&vOnEvIRUV}OAUScjA48KP@ICwpz`JP5r1 z!bbp(XI>;Gqc`u8Rf|yIigJ4-Z7CVU{P1EfcY-M_>k{qm%MoWR{M|65P_uwgTDNnxUsr^3!((%O%MXV z8fhmuxJmEoNHHH|mBOcMYuQTR(=8=hyDn7vR)@z7`*aVl24YYQGFh@ZMhTwwx7SL0qR*>Uj&l3DPz(gMA_3r9j<`kRHPtw3a+0(h{Zou z1(H{{9@x!E2NQPBo6JHu#g3+M+%{+O87)KJkVh9~$4^g*5&gV5rxs-)tM*X#m0uE} zvb5inCs}96L0~5BfZF^nW`s|9&_}qjZ#g?4-EjTxtp%sU`?&9`w%I`GCg1kt*WvFa zL{n*=dNyngS??^aPj+Sxq8vGy++)lu_O`@+aLS+$r0fkbvQ=(&rc}OfKca?9{amZC z(l2@$x0#^g(l}4GFK_J4Ht|Zep(_kOWiwE^ME** znFx4#k8ET4 zf*etemtApgt@{D_s5vEc`C$Q(Bz{k}Tv5v(ikF9-Ri%RqVOg%m969=s`UPX+IWg$< z%>T7_eO~1su0=X-@668q1w1ZhaiRI|ZT@ezEZjWz_s35l1CH~~-{qeh{NT+4*T&O# z_)Dad#CKfb&1se0Ib}ENs&#i__uSno_Tszh-TsZm-{qG57TUkB`qfOJPL1Qd@0FgM zn`3g>Rrr^t!50hGR|UWGoXg+){mwi!^X>YlsrBLS*Kd9B$-=hh0ON;g50{_zyYzMY z8w1m*46mF1);=%nrS}}Hwd&h;|5e!Z%WvD{nWtY2mC)J33#@TIoG3gTF48|SZQ{&# ztGrfTVp}G=DruIg-+uWQLX~CvGBQNGzttPE<}JvVx>f#Dr{HeE$JfjXQ_Okp->k`y zdidtrEW=wn=Bj<0{nu=k>~)j3`HBh_d)x;Hv1I+1ZtRU&J(X0%=7MY zzH@HLyJbDA4Bu8*@Lhkh>+GEgp}Su`?LH9dyWD!J4R_4dvZGJe^EdeDnf;Nzo$!w1 zpcIhDGL5Z|Z|~0-vG=cl0m9_Xy!>|3Mc@pn#Rg5Fx)-Ij*KhB)V-VuV{c&ME|AAF4 zX29x^Jt5Rd+V0D1<_%gnQl09Sht5E4tA%cTSQszg5Xw13wZ8|3gDR+%Q2VzXcnGB~ zLxSzU8^FUQe}8)Kw4I?rwDN{+4d?e)@0s~GFswNP+|qD7Aozopr0P=j%fB*mh diff --git a/internal/framework/runnables/runnables.go b/internal/framework/runnables/runnables.go index d960475008..4c8aac5460 100644 --- a/internal/framework/runnables/runnables.go +++ b/internal/framework/runnables/runnables.go @@ -34,29 +34,33 @@ func (r *LeaderOrNonLeader) NeedLeaderElection() bool { return false } -// EnableAfterBecameLeader is a Runnable that will call the enable function when the current instance becomes +// CallFunctionsAfterBecameLeader is a Runnable that will call the given functions when the current instance becomes // the leader. -type EnableAfterBecameLeader struct { - enable func(context.Context) +type CallFunctionsAfterBecameLeader struct { + enableFunctions []func(context.Context) } var ( - _ manager.LeaderElectionRunnable = &EnableAfterBecameLeader{} - _ manager.Runnable = &EnableAfterBecameLeader{} + _ manager.LeaderElectionRunnable = &CallFunctionsAfterBecameLeader{} + _ manager.Runnable = &CallFunctionsAfterBecameLeader{} ) -// NewEnableAfterBecameLeader creates a new EnableAfterBecameLeader Runnable. -func NewEnableAfterBecameLeader(enable func(context.Context)) *EnableAfterBecameLeader { - return &EnableAfterBecameLeader{ - enable: enable, +// NewCallFunctionsAfterBecameLeader creates a new CallFunctionsAfterBecameLeader Runnable. +func NewCallFunctionsAfterBecameLeader( + enableFunctions []func(context.Context), +) *CallFunctionsAfterBecameLeader { + return &CallFunctionsAfterBecameLeader{ + enableFunctions: enableFunctions, } } -func (j *EnableAfterBecameLeader) Start(ctx context.Context) error { - j.enable(ctx) +func (j *CallFunctionsAfterBecameLeader) Start(ctx context.Context) error { + for _, f := range j.enableFunctions { + f(ctx) + } return nil } -func (j *EnableAfterBecameLeader) NeedLeaderElection() bool { +func (j *CallFunctionsAfterBecameLeader) NeedLeaderElection() bool { return true } diff --git a/internal/framework/runnables/runnables_test.go b/internal/framework/runnables/runnables_test.go index 9f34d9ccba..7a9b8968ba 100644 --- a/internal/framework/runnables/runnables_test.go +++ b/internal/framework/runnables/runnables_test.go @@ -23,19 +23,25 @@ func TestLeaderOrNonLeader(t *testing.T) { g.Expect(leaderOrNonLeader.NeedLeaderElection()).To(BeFalse()) } -func TestEnableAfterBecameLeader(t *testing.T) { +func TestCallFunctionsAfterBecameLeader(t *testing.T) { t.Parallel() - enabled := false - enableAfterBecameLeader := NewEnableAfterBecameLeader(func(_ context.Context) { - enabled = true + statusUpdaterEnabled := false + healthCheckEnableLeader := false + eventHandlerEnabled := false + + callFunctionsAfterBecameLeader := NewCallFunctionsAfterBecameLeader([]func(ctx context.Context){ + func(_ context.Context) { statusUpdaterEnabled = true }, + func(_ context.Context) { healthCheckEnableLeader = true }, + func(_ context.Context) { eventHandlerEnabled = true }, }) g := NewWithT(t) - g.Expect(enableAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - g.Expect(enabled).To(BeFalse()) + g.Expect(callFunctionsAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - err := enableAfterBecameLeader.Start(context.Background()) + err := callFunctionsAfterBecameLeader.Start(context.Background()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(enabled).To(BeTrue()) + g.Expect(statusUpdaterEnabled).To(BeTrue()) + g.Expect(healthCheckEnableLeader).To(BeTrue()) + g.Expect(eventHandlerEnabled).To(BeTrue()) } diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 1020f11e03..84a658ae4e 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -167,13 +167,33 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log changeType, gr := h.cfg.processor.Process() - // Once we've processed resources on startup and built our first graph, mark the Pod as ready. - if !h.cfg.graphBuiltHealthChecker.ready { - h.cfg.graphBuiltHealthChecker.setAsReady() + // Once we've processed resources on startup and built our first graph, mark the Pod as having built the graph. + if !h.cfg.graphBuiltHealthChecker.graphBuilt { + h.cfg.graphBuiltHealthChecker.setGraphBuilt() } - // TODO(sberman): hardcode this deployment name until we support provisioning data planes - // If no deployments exist, we should just return without doing anything. + // if this Pod is not the leader or does not have the leader lease yet, + // the nginx conf should not be updated. + if !h.cfg.graphBuiltHealthChecker.leader { + return + } + + h.sendNginxConfig(ctx, logger, gr, changeType) +} + +func (h *eventHandlerImpl) eventHandlerEnable(ctx context.Context) { + // Latest graph is guaranteed to not be nil since the leader election process takes longer than + // the initial call to HandleEventBatch when NGF starts up. And GatewayClass will typically always exist which + // triggers an event. + h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph(), state.ClusterStateChange) +} + +func (h *eventHandlerImpl) sendNginxConfig( + ctx context.Context, + logger logr.Logger, + gr *graph.Graph, + changeType state.ChangeType, +) { deploymentName := types.NamespacedName{ Name: "tmp-nginx-deployment", Namespace: h.cfg.gatewayPodConfig.Namespace, diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 8b81b364ac..dbc4ea9ed6 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -126,7 +126,9 @@ var _ = Describe("eventHandler", func() { metricsCollector: collectors.NewControllerNoopCollector(), updateGatewayClassStatus: true, }) - Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) + Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeFalse()) + + handler.cfg.graphBuiltHealthChecker.leader = true }) AfterEach(func() { @@ -161,7 +163,7 @@ var _ = Describe("eventHandler", func() { }) AfterEach(func() { - Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeTrue()) + Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeTrue()) }) When("a batch has one event", func() { @@ -484,22 +486,36 @@ var _ = Describe("eventHandler", func() { Expect(gr.LatestReloadResult.Error.Error()).To(Equal("status error")) }) - It("should set the health checker status properly", func() { + It("should update nginx conf only when leader", func() { + ctx := context.Background() + handler.cfg.graphBuiltHealthChecker.leader = false + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) - Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + // graph is built, but since the graphBuiltHealthChecker.leader is false, configuration isn't created and + // the readyCheck fails + Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeTrue()) + Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) + Expect(readyChannel).ShouldNot(BeClosed()) + + // Once the pod becomes leader, these two functions will be called through the runnables we set in the manager + handler.cfg.graphBuiltHealthChecker.setAsLeader(ctx) + handler.eventHandlerEnable(ctx) + + // nginx conf has been set dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) - Expect(readyChannel).To(BeClosed()) - + // ready check is also set Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).To(Succeed()) + Expect(handler.cfg.graphBuiltHealthChecker.getReadyCh()).To(BeClosed()) }) It("should panic for an unknown event type", func() { diff --git a/internal/mode/static/health.go b/internal/mode/static/health.go index a0fe4e9b59..4993b0b40e 100644 --- a/internal/mode/static/health.go +++ b/internal/mode/static/health.go @@ -1,9 +1,17 @@ package static import ( + "context" "errors" + "fmt" + "net" "net/http" "sync" + "time" + + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) // newGraphBuiltHealthChecker creates a new graphBuiltHealthChecker. @@ -13,37 +21,94 @@ func newGraphBuiltHealthChecker() *graphBuiltHealthChecker { } } -// graphBuiltHealthChecker is used to check if the initial graph is built and the NGF Pod is ready. +// graphBuiltHealthChecker is used to check if the NGF Pod is ready. The NGF Pod is ready if the initial graph has +// been built and if it is leader. type graphBuiltHealthChecker struct { // readyCh is a channel that is initialized in newGraphBuiltHealthChecker and represents if the NGF Pod is ready. - readyCh chan struct{} - lock sync.RWMutex - ready bool + readyCh chan struct{} + lock sync.RWMutex + graphBuilt bool + leader bool +} + +// createHealthProbe creates a Server runnable to serve as our health and readiness checker. +func createHealthProbe(cfg config.Config, healthChecker *graphBuiltHealthChecker) (manager.Server, error) { + // we chose to create our own health probe server instead of using the controller-runtime one because + // of repetitive log which would flood our logs on non-ready non-leader NGF Pods. This health probe is + // similar to the controller-runtime's health probe. + + mux := http.NewServeMux() + + // copy of controller-runtime sane defaults for new http.Server + s := &http.Server{ + Handler: mux, + MaxHeaderBytes: 1 << 20, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, + } + + mux.HandleFunc(readinessEndpointName, healthChecker.readyHandler) + + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.HealthConfig.Port)) + if err != nil { + return manager.Server{}, + fmt.Errorf("error listening on %s: %w", fmt.Sprintf(":%d", cfg.HealthConfig.Port), err) + } + + return manager.Server{ + Name: "health probe", + Server: s, + Listener: ln, + }, nil +} + +func (h *graphBuiltHealthChecker) readyHandler(resp http.ResponseWriter, req *http.Request) { + if err := h.readyCheck(req); err != nil { + resp.WriteHeader(http.StatusServiceUnavailable) + } else { + resp.WriteHeader(http.StatusOK) + } } // readyCheck returns the ready-state of the Pod. It satisfies the controller-runtime Checker type. -// We are considered ready after the first graph is built. +// We are considered ready after the first graph is built and if the NGF Pod is leader. func (h *graphBuiltHealthChecker) readyCheck(_ *http.Request) error { h.lock.RLock() defer h.lock.RUnlock() - if !h.ready { - return errors.New("control plane is not yet ready") + if !h.leader { + return errors.New("this Pod is not currently leader") + } + + if !h.graphBuilt { + return errors.New("control plane initial graph has not been built") } return nil } -// setAsReady marks the health check as ready. -func (h *graphBuiltHealthChecker) setAsReady() { +// setGraphBuilt marks the health check as having the initial graph built. +func (h *graphBuiltHealthChecker) setGraphBuilt() { h.lock.Lock() defer h.lock.Unlock() - h.ready = true - close(h.readyCh) + h.graphBuilt = true } // getReadyCh returns a read-only channel, which determines if the NGF Pod is ready. func (h *graphBuiltHealthChecker) getReadyCh() <-chan struct{} { return h.readyCh } + +// setAsLeader marks the health check as leader. +func (h *graphBuiltHealthChecker) setAsLeader(_ context.Context) { + h.lock.Lock() + defer h.lock.Unlock() + + h.leader = true + + // setGraphBuilt should already have been called when processing the resources on startup because the leader + // election process takes longer than the initial call to HandleEventBatch. Thus, the NGF Pod should be marked as + // ready and have this channel be closed. + close(h.readyCh) +} diff --git a/internal/mode/static/health_test.go b/internal/mode/static/health_test.go index 7246283ed9..3505479d7d 100644 --- a/internal/mode/static/health_test.go +++ b/internal/mode/static/health_test.go @@ -1,17 +1,99 @@ package static import ( + "context" + "errors" + "net" + "net/http" + "net/http/httptest" "testing" . "github.com/onsi/gomega" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) func TestReadyCheck(t *testing.T) { t.Parallel() g := NewWithT(t) healthChecker := newGraphBuiltHealthChecker() - g.Expect(healthChecker.readyCheck(nil)).ToNot(Succeed()) - healthChecker.ready = true + g.Expect(healthChecker.readyCheck(nil)).To(MatchError(errors.New("this Pod is not currently leader"))) + + healthChecker.graphBuilt = true + g.Expect(healthChecker.readyCheck(nil)).To(MatchError(errors.New("this Pod is not currently leader"))) + + healthChecker.graphBuilt = false + healthChecker.leader = true + g.Expect(healthChecker.readyCheck(nil)). + To(MatchError(errors.New("control plane initial graph has not been built"))) + + healthChecker.graphBuilt = true g.Expect(healthChecker.readyCheck(nil)).To(Succeed()) } + +func TestSetAsLeader(t *testing.T) { + t.Parallel() + g := NewWithT(t) + healthChecker := newGraphBuiltHealthChecker() + + g.Expect(healthChecker.leader).To(BeFalse()) + g.Expect(healthChecker.readyCh).ShouldNot(BeClosed()) + + healthChecker.setAsLeader(context.Background()) + + g.Expect(healthChecker.leader).To(BeTrue()) + g.Expect(healthChecker.readyCh).To(BeClosed()) +} + +func TestSetGraphBuilt(t *testing.T) { + t.Parallel() + g := NewWithT(t) + healthChecker := newGraphBuiltHealthChecker() + + g.Expect(healthChecker.graphBuilt).To(BeFalse()) + + healthChecker.setGraphBuilt() + + g.Expect(healthChecker.graphBuilt).To(BeTrue()) +} + +func TestReadyHandler(t *testing.T) { + t.Parallel() + g := NewWithT(t) + healthChecker := newGraphBuiltHealthChecker() + + r := httptest.NewRequest(http.MethodGet, "/readyz", nil) + w := httptest.NewRecorder() + + healthChecker.readyHandler(w, r) + g.Expect(w.Result().StatusCode).To(Equal(http.StatusServiceUnavailable)) + + healthChecker.graphBuilt = true + healthChecker.leader = true + + w = httptest.NewRecorder() + healthChecker.readyHandler(w, r) + g.Expect(w.Result().StatusCode).To(Equal(http.StatusOK)) +} + +func TestCreateHealthProbe(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + healthChecker := newGraphBuiltHealthChecker() + + cfg := config.Config{HealthConfig: config.HealthConfig{Port: 100000}} + _, err := createHealthProbe(cfg, healthChecker) + g.Expect(err).To(MatchError("error listening on :100000: listen tcp: address 100000: invalid port")) + + cfg = config.Config{HealthConfig: config.HealthConfig{Port: 8081}} + hp, err := createHealthProbe(cfg, healthChecker) + g.Expect(err).ToNot(HaveOccurred()) + + addr, ok := (hp.Listener.Addr()).(*net.TCPAddr) + g.Expect(ok).To(BeTrue()) + + g.Expect(addr.Port).To(Equal(cfg.HealthConfig.Port)) + g.Expect(hp.Server).ToNot(BeNil()) +} diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index e6959e6609..31574a9f64 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -75,6 +75,8 @@ const ( plusClientCertField = "tls.crt" plusClientKeyField = "tls.key" grpcServerPort = 8443 + // defined in our deployment.yaml. + readinessEndpointName = "/readyz" ) var scheme = runtime.NewScheme() @@ -245,8 +247,12 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register event loop: %w", err) } - if err = mgr.Add(runnables.NewEnableAfterBecameLeader(groupStatusUpdater.Enable)); err != nil { - return fmt.Errorf("cannot register status updater: %w", err) + if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ + groupStatusUpdater.Enable, + healthChecker.setAsLeader, + eventHandler.eventHandlerEnable, + })); err != nil { + return fmt.Errorf("cannot register functions that get called after Pod becomes leader: %w", err) } if cfg.ProductTelemetryConfig.Enabled { @@ -274,6 +280,7 @@ func StartManager(cfg config.Config) error { } cfg.Logger.Info("Starting manager") + cfg.Logger.Info("NGINX Gateway Fabric Pod will be marked as unready until it has the leader lease") go func() { <-ctx.Done() cfg.Logger.Info("Shutting down") @@ -326,10 +333,6 @@ func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (m }, } - if cfg.HealthConfig.Enabled { - options.HealthProbeBindAddress = fmt.Sprintf(":%d", cfg.HealthConfig.Port) - } - clusterCfg := ctlr.GetConfigOrDie() clusterCfg.Timeout = clusterTimeout @@ -339,8 +342,13 @@ func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (m } if cfg.HealthConfig.Enabled { - if err := mgr.AddReadyzCheck("readyz", healthChecker.readyCheck); err != nil { - return nil, fmt.Errorf("error adding ready check: %w", err) + healthProbeServer, err := createHealthProbe(cfg, healthChecker) + if err != nil { + return nil, fmt.Errorf("error creating health probe: %w", err) + } + + if err := mgr.Add(&healthProbeServer); err != nil { + return nil, fmt.Errorf("error adding health probe: %w", err) } } From 0ac4d53ec1eb34972a49a50840e93a4ac41afef8 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Thu, 13 Feb 2025 10:00:23 -0700 Subject: [PATCH 08/32] CP/DP Split: Support basic NGINX OSS provisioning (#3114) This commit updates the control plane to deploy an NGINX data plane when a valid Gateway resource is created. When the Gateway is deleted or becomes invalid, the data plane is removed. The NginxProxy resource has been updated with numerous configuration options related to the k8s deployment and service configs, which the control plane will apply to the NGINX resources when set. The control plane fully owns the NGINX deployment resources, so users who want to change any configuration must do so using the NginxProxy resource. This does not yet support NGINX Plus or NGINX debug mode. Those will be added in followup pull requests. This also adds some basic daemonset fields, but does not yet support deploying a daemosnet. That will also be added soon. --- .github/workflows/helm.yml | 2 +- .yamllint.yaml | 4 +- Makefile | 4 +- apis/v1alpha2/nginxproxy_types.go | 320 +- apis/v1alpha2/zz_generated.deepcopy.go | 250 ++ charts/nginx-gateway-fabric/README.md | 64 +- charts/nginx-gateway-fabric/README.md.gotmpl | 7 +- .../templates/_helpers.tpl | 2 +- .../templates/clusterrole.yaml | 23 +- .../templates/configmap.yaml | 34 - .../templates/deployment.yaml | 54 +- .../templates/gatewayclass.yaml | 2 - .../templates/nginxproxy.yaml | 23 +- .../templates/serviceaccount.yaml | 12 +- .../templates/tmp-nginx-agent-conf.yaml | 43 - .../templates/tmp-nginx-deployment.yaml | 204 - .../templates/tmp-nginx-service.yaml | 36 - .../nginx-gateway-fabric/values.schema.json | 422 +- charts/nginx-gateway-fabric/values.yaml | 255 +- cmd/gateway/commands.go | 37 +- cmd/gateway/commands_test.go | 51 +- .../bases/gateway.nginx.org_nginxproxies.yaml | 3462 +++++++++++++++++ config/tests/static-deployment.yaml | 10 +- deploy/aws-nlb/deploy.yaml | 269 +- deploy/azure/deploy.yaml | 268 +- deploy/crds.yaml | 3462 +++++++++++++++++ deploy/default/deploy.yaml | 263 +- deploy/experimental-nginx-plus/deploy.yaml | 287 +- deploy/experimental/deploy.yaml | 264 +- deploy/nginx-plus/deploy.yaml | 286 +- deploy/nodeport/deploy.yaml | 263 +- deploy/openshift/deploy.yaml | 265 +- .../snippets-filters-nginx-plus/deploy.yaml | 286 +- deploy/snippets-filters/deploy.yaml | 263 +- docs/developer/quickstart.md | 4 +- examples/helm/aws-nlb/values.yaml | 11 +- examples/helm/azure/values.yaml | 8 +- .../helm/experimental-nginx-plus/values.yaml | 2 - examples/helm/nginx-plus/values.yaml | 2 - examples/helm/nodeport/values.yaml | 5 +- .../snippets-filters-nginx-plus/values.yaml | 2 - go.mod | 2 +- internal/framework/controller/labels.go | 12 + .../controller/predicate/annotation.go | 41 + .../controller/predicate/annotation_test.go | 178 + .../framework/controller/predicate/label.go | 18 + .../framework/controller/predicate/service.go | 53 - .../controller/predicate/service_test.go | 220 +- internal/framework/controller/resource.go | 9 + internal/mode/static/config/config.go | 13 +- internal/mode/static/handler.go | 194 +- internal/mode/static/handler_test.go | 171 +- internal/mode/static/manager.go | 47 +- internal/mode/static/nginx/agent/agent.go | 2 + internal/mode/static/nginx/agent/command.go | 4 +- .../mode/static/nginx/agent/command_test.go | 6 + .../mode/static/nginx/agent/deployment.go | 12 +- .../static/nginx/agent/deployment_test.go | 3 + .../static/nginx/agent/grpc/connections.go | 12 +- .../nginx/agent/grpc/connections_test.go | 24 +- .../grpcfakes/fake_connections_tracker.go | 79 +- internal/mode/static/provisioner/doc.go | 4 + internal/mode/static/provisioner/eventloop.go | 126 + internal/mode/static/provisioner/handler.go | 162 + internal/mode/static/provisioner/objects.go | 524 +++ .../mode/static/provisioner/provisioner.go | 354 ++ .../provisionerfakes/fake_provisioner.go | 117 + internal/mode/static/provisioner/setter.go | 45 + internal/mode/static/provisioner/store.go | 196 + internal/mode/static/provisioner/templates.go | 73 + .../mode/static/state/change_processor.go | 3 - .../static/state/change_processor_test.go | 9 + internal/mode/static/state/graph/gateway.go | 11 +- .../mode/static/state/graph/gateway_test.go | 5 +- internal/mode/static/state/graph/graph.go | 16 +- .../mode/static/state/graph/graph_test.go | 10 +- .../mode/static/state/graph/nginxproxy.go | 13 + .../static/state/graph/nginxproxy_test.go | 77 + internal/mode/static/status/queue.go | 19 +- internal/mode/static/status/queue_test.go | 4 + scripts/generate-manifests.sh | 2 +- tests/Makefile | 4 +- tests/framework/ngf.go | 2 +- 83 files changed, 10824 insertions(+), 3583 deletions(-) delete mode 100644 charts/nginx-gateway-fabric/templates/configmap.yaml delete mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml delete mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml delete mode 100644 charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml create mode 100644 internal/framework/controller/labels.go create mode 100644 internal/framework/controller/predicate/label.go create mode 100644 internal/framework/controller/resource.go create mode 100644 internal/mode/static/provisioner/doc.go create mode 100644 internal/mode/static/provisioner/eventloop.go create mode 100644 internal/mode/static/provisioner/handler.go create mode 100644 internal/mode/static/provisioner/objects.go create mode 100644 internal/mode/static/provisioner/provisioner.go create mode 100644 internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go create mode 100644 internal/mode/static/provisioner/setter.go create mode 100644 internal/mode/static/provisioner/store.go create mode 100644 internal/mode/static/provisioner/templates.go diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yml index fd0a791125..21c03b6d76 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yml @@ -176,4 +176,4 @@ jobs: --set=nginx.plus=${{ inputs.image == 'plus' }} \ --set=nginx.image.tag=nightly \ --set=nginxGateway.productTelemetry.enable=false \ - ${{ inputs.image == 'plus' && '--set=serviceAccount.imagePullSecret=nginx-plus-registry-secret --set=nginx.image.repository=private-registry.nginx.com/nginx-gateway-fabric/nginx-plus' || '' }}" + ${{ inputs.image == 'plus' && '--set=nginx.imagePullSecret=nginx-plus-registry-secret --set=nginx.image.repository=private-registry.nginx.com/nginx-gateway-fabric/nginx-plus' || '' }}" diff --git a/.yamllint.yaml b/.yamllint.yaml index 83713689aa..e52cae4940 100644 --- a/.yamllint.yaml +++ b/.yamllint.yaml @@ -14,7 +14,9 @@ rules: require-starting-space: true ignore-shebangs: true min-spaces-from-content: 1 - comments-indentation: enable + comments-indentation: + ignore: | + charts/nginx-gateway-fabric/values.yaml document-end: disable document-start: disable empty-lines: enable diff --git a/Makefile b/Makefile index e7f758d95b..269fabd91c 100644 --- a/Makefile +++ b/Makefile @@ -226,13 +226,13 @@ install-ngf-local-build-with-plus: check-for-plus-usage-endpoint build-images-wi .PHONY: helm-install-local helm-install-local: install-gateway-crds ## Helm install NGF on configured kind cluster with local images. To build, load, and install with helm run make install-ngf-local-build. - helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PREFIX) --create-namespace --wait --set nginxGateway.image.pullPolicy=Never --set service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway $(HELM_PARAMETERS) + helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PREFIX) --create-namespace --wait --set nginxGateway.image.pullPolicy=Never --set nginx.service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway $(HELM_PARAMETERS) .PHONY: helm-install-local-with-plus helm-install-local-with-plus: check-for-plus-usage-endpoint install-gateway-crds ## Helm install NGF with NGINX Plus on configured kind cluster with local images. To build, load, and install with helm run make install-ngf-local-build-with-plus. kubectl create namespace nginx-gateway || true kubectl -n nginx-gateway create secret generic nplus-license --from-file $(PLUS_LICENSE_FILE) || true - helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PLUS_PREFIX) --wait --set nginxGateway.image.pullPolicy=Never --set service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway --set nginx.plus=true --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) $(HELM_PARAMETERS) + helm install nginx-gateway $(CHART_DIR) --set nginx.image.repository=$(NGINX_PLUS_PREFIX) --wait --set nginxGateway.image.pullPolicy=Never --set nginx.service.type=NodePort --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) -n nginx-gateway --set nginx.plus=true --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) $(HELM_PARAMETERS) .PHONY: check-for-plus-usage-endpoint check-for-plus-usage-endpoint: ## Checks that the PLUS_USAGE_ENDPOINT is set in the environment. This env var is required when deploying or testing with N+. diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index 7c10bd9f3c..8f8a2671e8 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -1,6 +1,7 @@ package v1alpha2 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" @@ -47,6 +48,11 @@ type NginxProxySpec struct { // // +optional Telemetry *Telemetry `json:"telemetry,omitempty"` + // Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + // re-roll of the NGINX deployment. + // + // +optional + Metrics *Metrics `json:"metrics,omitempty"` // RewriteClientIP defines configuration for rewriting the client IP to the original client's IP. // +kubebuilder:validation:XValidation:message="if mode is set, trustedAddresses is a required field",rule="!(has(self.mode) && (!has(self.trustedAddresses) || size(self.trustedAddresses) == 0))" // @@ -66,14 +72,10 @@ type NginxProxySpec struct { // // +optional DisableHTTP2 *bool `json:"disableHTTP2,omitempty"` -} - -// NginxPlus specifies NGINX Plus additional settings. These will only be applied if NGINX Plus is being used. -type NginxPlus struct { - // AllowedAddresses specifies IPAddresses or CIDR blocks to the allow list for accessing the NGINX Plus API. + // Kubernetes contains the configuration for the NGINX Deployment and Service Kubernetes objects. // // +optional - AllowedAddresses []NginxPlusAllowAddress `json:"allowedAddresses,omitempty"` + Kubernetes *KubernetesSpec `json:"kubernetes,omitempty"` } // Telemetry specifies the OpenTelemetry configuration. @@ -82,6 +84,7 @@ type Telemetry struct { // // +optional DisabledFeatures []DisableTelemetryFeature `json:"disabledFeatures,omitempty"` + // Exporter specifies OpenTelemetry export parameters. // // +optional @@ -105,6 +108,16 @@ type Telemetry struct { SpanAttributes []v1alpha1.SpanAttribute `json:"spanAttributes,omitempty"` } +// DisableTelemetryFeature is a telemetry feature that can be disabled. +// +// +kubebuilder:validation:Enum=DisableTracing +type DisableTelemetryFeature string + +const ( + // DisableTracing disables the OpenTelemetry tracing feature. + DisableTracing DisableTelemetryFeature = "DisableTracing" +) + // TelemetryExporter specifies OpenTelemetry export parameters. type TelemetryExporter struct { // Interval is the maximum interval between two exports. @@ -136,6 +149,21 @@ type TelemetryExporter struct { Endpoint *string `json:"endpoint,omitempty"` } +// Metrics defines the configuration for Prometheus scraping metrics. +type Metrics struct { + // Port where the Prometheus metrics are exposed. + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *int32 `json:"port,omitempty"` + + // Disable serving Prometheus metrics on the listen port. + // + // +optional + Disable *bool `json:"disable,omitempty"` +} + // RewriteClientIP specifies the configuration for rewriting the client's IP address. type RewriteClientIP struct { // Mode defines how NGINX will rewrite the client's IP address. @@ -229,27 +257,6 @@ const ( RewriteClientIPHostnameAddressType RewriteClientIPAddressType = "Hostname" ) -// NginxPlusAllowAddress specifies the address type and value for an NginxPlus allow address. -type NginxPlusAllowAddress struct { - // Type specifies the type of address. - Type NginxPlusAllowAddressType `json:"type"` - - // Value specifies the address value. - Value string `json:"value"` -} - -// NginxPlusAllowAddressType specifies the type of address. -// +kubebuilder:validation:Enum=CIDR;IPAddress -type NginxPlusAllowAddressType string - -const ( - // NginxPlusAllowCIDRAddressType specifies that the address is a CIDR block. - NginxPlusAllowCIDRAddressType NginxPlusAllowAddressType = "CIDR" - - // NginxPlusAllowIPAddressType specifies that the address is an IP address. - NginxPlusAllowIPAddressType NginxPlusAllowAddressType = "IPAddress" -) - // NginxLogging defines logging related settings for NGINX. type NginxLogging struct { // ErrorLevel defines the error log level. Possible log levels listed in order of increasing severity are @@ -260,6 +267,13 @@ type NginxLogging struct { // +optional // +kubebuilder:default=info ErrorLevel *NginxErrorLogLevel `json:"errorLevel,omitempty"` + + // AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + // re-roll of the NGINX deployment. + // + // +optional + // +kubebuilder:default=info + AgentLevel *AgentLogLevel `json:"agentLevel,omitempty"` } // NginxErrorLogLevel type defines the log level of error logs for NGINX. @@ -293,12 +307,254 @@ const ( NginxLogLevelEmerg NginxErrorLogLevel = "emerg" ) -// DisableTelemetryFeature is a telemetry feature that can be disabled. +// AgentLevel defines the log level of the NGINX agent process. // -// +kubebuilder:validation:Enum=DisableTracing -type DisableTelemetryFeature string +// +kubebuilder:validation:Enum=debug;info;error;panic;fatal +type AgentLogLevel string const ( - // DisableTracing disables the OpenTelemetry tracing feature. - DisableTracing DisableTelemetryFeature = "DisableTracing" + // AgentLogLevelDebug is the debug level NGINX agent logs. + AgentLogLevelDebug AgentLogLevel = "debug" + + // AgentLogLevelInfo is the info level NGINX agent logs. + AgentLogLevelInfo AgentLogLevel = "info" + + // AgentLogLevelError is the error level NGINX agent logs. + AgentLogLevelError AgentLogLevel = "error" + + // AgentLogLevelPanic is the panic level NGINX agent logs. + AgentLogLevelPanic AgentLogLevel = "panic" + + // AgentLogLevelFatal is the fatal level NGINX agent logs. + AgentLogLevelFatal AgentLogLevel = "fatal" +) + +// NginxPlus specifies NGINX Plus additional settings. These will only be applied if NGINX Plus is being used. +type NginxPlus struct { + // AllowedAddresses specifies IPAddresses or CIDR blocks to the allow list for accessing the NGINX Plus API. + // + // +optional + AllowedAddresses []NginxPlusAllowAddress `json:"allowedAddresses,omitempty"` +} + +// NginxPlusAllowAddress specifies the address type and value for an NginxPlus allow address. +type NginxPlusAllowAddress struct { + // Type specifies the type of address. + Type NginxPlusAllowAddressType `json:"type"` + + // Value specifies the address value. + Value string `json:"value"` +} + +// NginxPlusAllowAddressType specifies the type of address. +// +kubebuilder:validation:Enum=CIDR;IPAddress +type NginxPlusAllowAddressType string + +const ( + // NginxPlusAllowCIDRAddressType specifies that the address is a CIDR block. + NginxPlusAllowCIDRAddressType NginxPlusAllowAddressType = "CIDR" + + // NginxPlusAllowIPAddressType specifies that the address is an IP address. + NginxPlusAllowIPAddressType NginxPlusAllowAddressType = "IPAddress" +) + +// KubernetesSpec contains the configuration for the NGINX Deployment and Service Kubernetes objects. +type KubernetesSpec struct { + // Deployment is the configuration for the NGINX Deployment. + // This is the default deployment option. + // + // +optional + Deployment *DeploymentSpec `json:"deployment,omitempty"` + + // Service is the configuration for the NGINX Service. + // + // +optional + Service *ServiceSpec `json:"service,omitempty"` +} + +// Deployment is the configuration for the NGINX Deployment. +type DeploymentSpec struct { + // Number of desired Pods. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Pod defines Pod-specific fields. + // + // +optional + Pod PodSpec `json:"pod,omitempty"` + + // Container defines container fields for the NGINX container. + // + // +optional + Container ContainerSpec `json:"container,omitempty"` +} + +// PodSpec defines Pod-specific fields. +type PodSpec struct { + // TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // Affinity is the pod's scheduling constraints. + // + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations allow the scheduler to schedule Pods with matching taints. + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Volumes represents named volumes in a pod that may be accessed by any container in the pod. + // + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // TopologySpreadConstraints describes how a group of Pods ought to spread across topology + // domains. Scheduler will schedule Pods in a way which abides by the constraints. + // All topologySpreadConstraints are ANDed. + // + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ContainerSpec defines container fields for the NGINX container. +type ContainerSpec struct { + // Image is the NGINX image to use. + // + // +optional + Image *Image `json:"image,omitempty"` + + // Resources describes the compute resource requirements. + // + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // Lifecycle describes actions that the management system should take in response to container lifecycle + // events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + // until the action is complete, unless the container process fails, in which case the handler is aborted. + // + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` + + // VolumeMounts describe the mounting of Volumes within a container. + // + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` +} + +// Image is the NGINX image to use. +type Image struct { + // Repository is the image path. + // Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + // + // +optional + Repository *string `json:"repository,omitempty"` + // Tag is the image tag to use. Default matches the tag of the control plane. + // + // +optional + Tag *string `json:"tag,omitempty"` + // PullPolicy describes a policy for if/when to pull a container image. + // + // +optional + // +kubebuilder:default:=IfNotPresent + PullPolicy *PullPolicy `json:"pullPolicy,omitempty"` +} + +// PullPolicy describes a policy for if/when to pull a container image. +// +kubebuilder:validation:Enum=Always;Never;IfNotPresent +type PullPolicy corev1.PullPolicy + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail if the pull fails. + PullAlways PullPolicy = PullPolicy(corev1.PullAlways) + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the + // image isn't present. + PullNever PullPolicy = PullPolicy(corev1.PullNever) + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image + // isn't present and the pull fails. + PullIfNotPresent PullPolicy = PullPolicy(corev1.PullIfNotPresent) +) + +// ServiceSpec is the configuration for the NGINX Service. +type ServiceSpec struct { + // ServiceType describes ingress method for the Service. + // + // +optional + // +kubebuilder:default:=LoadBalancer + ServiceType *ServiceType `json:"type,omitempty"` + + // ExternalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + // and LoadBalancer IPs. + // + // +optional + // +kubebuilder:default:=Local + ExternalTrafficPolicy *ExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` + + // LoadBalancerIP is a static IP address for the load balancer. Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + + // Annotations contain any Service-specific annotations. + // + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + // Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` +} + +// ServiceType describes ingress method for the Service. +// +kubebuilder:validation:Enum=ClusterIP;LoadBalancer;NodePort +type ServiceType corev1.ServiceType + +const ( + // ServiceTypeClusterIP means a Service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = ServiceType(corev1.ServiceTypeClusterIP) + + // ServiceTypeNodePort means a Service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = ServiceType(corev1.ServiceTypeNodePort) + + // ServiceTypeLoadBalancer means a Service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = ServiceType(corev1.ServiceTypeLoadBalancer) +) + +// ExternalTrafficPolicy describes how nodes distribute service traffic they +// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, +// and LoadBalancer IPs. +// +kubebuilder:validation:Enum=Cluster;Local +type ExternalTrafficPolicy corev1.ServiceExternalTrafficPolicy + +const ( + // ExternalTrafficPolicyCluster routes traffic to all endpoints. + ExternalTrafficPolicyCluster ExternalTrafficPolicy = ExternalTrafficPolicy(corev1.ServiceExternalTrafficPolicyCluster) + + // ExternalTrafficPolicyLocal preserves the source IP of the traffic by + // routing only to endpoints on the same node as the traffic was received on + // (dropping the traffic if there are no local endpoints). + ExternalTrafficPolicyLocal ExternalTrafficPolicy = ExternalTrafficPolicy(corev1.ServiceExternalTrafficPolicyLocal) ) diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index 6e0856a220..c6420fc2f2 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -6,10 +6,150 @@ package v1alpha2 import ( "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSpec. +func (in *ContainerSpec) DeepCopy() *ContainerSpec { + if in == nil { + return nil + } + out := new(ContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Pod.DeepCopyInto(&out.Pod) + in.Container.DeepCopyInto(&out.Container) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSpec) DeepCopyInto(out *KubernetesSpec) { + *out = *in + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(DeploymentSpec) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSpec. +func (in *KubernetesSpec) DeepCopy() *KubernetesSpec { + if in == nil { + return nil + } + out := new(KubernetesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metrics) DeepCopyInto(out *Metrics) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. +func (in *Metrics) DeepCopy() *Metrics { + if in == nil { + return nil + } + out := new(Metrics) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { *out = *in @@ -18,6 +158,11 @@ func (in *NginxLogging) DeepCopyInto(out *NginxLogging) { *out = new(NginxErrorLogLevel) **out = **in } + if in.AgentLevel != nil { + in, out := &in.AgentLevel, &out.AgentLevel + *out = new(AgentLogLevel) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxLogging. @@ -136,6 +281,11 @@ func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { *out = new(Telemetry) (*in).DeepCopyInto(*out) } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(Metrics) + (*in).DeepCopyInto(*out) + } if in.RewriteClientIP != nil { in, out := &in.RewriteClientIP, &out.RewriteClientIP *out = new(RewriteClientIP) @@ -156,6 +306,11 @@ func (in *NginxProxySpec) DeepCopyInto(out *NginxProxySpec) { *out = new(bool) **out = **in } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxProxySpec. @@ -254,6 +409,59 @@ func (in *ObservabilityPolicySpec) DeepCopy() *ObservabilityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RewriteClientIP) DeepCopyInto(out *RewriteClientIP) { *out = *in @@ -299,6 +507,48 @@ func (in *RewriteClientIPAddress) DeepCopy() *RewriteClientIPAddress { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(ServiceType) + **out = **in + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(ExternalTrafficPolicy) + **out = **in + } + if in.LoadBalancerIP != nil { + in, out := &in.LoadBalancerIP, &out.LoadBalancerIP + *out = new(string) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Telemetry) DeepCopyInto(out *Telemetry) { *out = *in diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 6736a3a0b0..3d0d8fc235 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -112,13 +112,13 @@ By default, the NGINX Gateway Fabric helm chart deploys a LoadBalancer Service. To use a NodePort Service instead: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.type=NodePort +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` To disable the creation of a Service: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.create=false +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.create=false ``` ## Upgrading the Chart @@ -253,66 +253,70 @@ kubectl kustomize https://github.com/nginx/nginx-gateway-fabric/config/crd/gatew The following table lists the configurable parameters of the NGINX Gateway Fabric chart and their default values. +> More granular configuration options may not show up in this table. +> Viewing the `values.yaml` file directly can show all available options. + | Key | Description | Type | Default | |-----|-------------|------|---------| -| `affinity` | The affinity of the NGINX Gateway Fabric pod. | object | `{}` | -| `extraVolumes` | extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. | list | `[]` | -| `metrics.enable` | Enable exposing metrics in the Prometheus format. | bool | `true` | -| `metrics.port` | Set the port where the Prometheus metrics are exposed. | int | `9113` | -| `metrics.secure` | Enable serving metrics via https. By default metrics are served via http. Please note that this endpoint will be secured with a self-signed certificate. | bool | `false` | +| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | | `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. | object | `{}` | +| `nginx.container` | The container configuration for the NGINX container. | object | `{}` | | `nginx.debug` | Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. | bool | `false` | -| `nginx.extraVolumeMounts` | extraVolumeMounts are the additional volume mounts for the nginx container. | list | `[]` | -| `nginx.image.pullPolicy` | | string | `"Always"` | | `nginx.image.repository` | The NGINX image to use. | string | `"ghcr.io/nginx/nginx-gateway-fabric/nginx"` | -| `nginx.image.tag` | | string | `"edge"` | -| `nginx.lifecycle` | The lifecycle of the nginx container. | object | `{}` | -| `nginx.plus` | Is NGINX Plus image being used | bool | `false` | +| `nginx.imagePullSecret` | The name of the secret containing docker registry credentials. Secret must exist in the same namespace as the helm release. The control plane will copy this secret into any namespace where NGINX is deployed. | string | `""` | +| `nginx.imagePullSecrets` | A list of secret names containing docker registry credentials. Secrets must exist in the same namespace as the helm release. The control plane will copy these secrets into any namespace where NGINX is deployed. | list | `[]` | +| `nginx.kind` | The kind of NGINX deployment. | string | `"deployment"` | +| `nginx.plus` | Is NGINX Plus image being used. | bool | `false` | +| `nginx.pod` | The pod configuration for the NGINX data plane pod. | object | `{}` | +| `nginx.replicas` | The number of replicas of the NGINX Deployment. | int | `1` | +| `nginx.service` | The service configuration for the NGINX data plane. | object | `{"externalTrafficPolicy":"Local","type":"LoadBalancer"}` | +| `nginx.service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | +| `nginx.service.type` | The type of service to create for the NGINX data plane. | string | `"LoadBalancer"` | | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.endpoint` | The endpoint of the NGINX Plus usage reporting server. Default: product.connect.nginx.com | string | `""` | | `nginx.usage.resolver` | The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager. | string | `""` | | `nginx.usage.secretName` | The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"nplus-license"` | | `nginx.usage.skipVerify` | Disable client verification of the NGINX Plus usage reporting server certificate. | bool | `false` | +| `nginxGateway` | The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. | object | `{"affinity":{},"config":{"logging":{"level":"info"}},"configAnnotations":{},"extraVolumeMounts":[],"extraVolumes":[],"gatewayClassAnnotations":{},"gatewayClassName":"nginx","gatewayControllerName":"gateway.nginx.org/nginx-gateway-controller","gwAPIExperimentalFeatures":{"enable":false},"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"},"kind":"deployment","leaderElection":{"enable":true,"lockName":""},"lifecycle":{},"metrics":{"enable":true,"port":9113,"secure":false},"nodeSelector":{},"podAnnotations":{},"productTelemetry":{"enable":true},"readinessProbe":{"enable":true,"initialDelaySeconds":3,"port":8081},"replicas":1,"resources":{},"service":{"annotations":{}},"serviceAccount":{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""},"snippetsFilters":{"enable":false},"terminationGracePeriodSeconds":30,"tolerations":[],"topologySpreadConstraints":[]}` | +| `nginxGateway.affinity` | The affinity of the NGINX Gateway Fabric control plane pod. | object | `{}` | | `nginxGateway.config.logging.level` | Log level. | string | `"info"` | | `nginxGateway.configAnnotations` | Set of custom annotations for NginxGateway objects. | object | `{}` | | `nginxGateway.extraVolumeMounts` | extraVolumeMounts are the additional volume mounts for the nginx-gateway container. | list | `[]` | +| `nginxGateway.extraVolumes` | extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with nginxGateway.extraVolumeMounts mount additional volumes to the container. | list | `[]` | | `nginxGateway.gatewayClassAnnotations` | Set of custom annotations for GatewayClass objects. | object | `{}` | | `nginxGateway.gatewayClassName` | The name of the GatewayClass that will be created as part of this release. Every NGINX Gateway Fabric must have a unique corresponding GatewayClass resource. NGINX Gateway Fabric only processes resources that belong to its class - i.e. have the "gatewayClassName" field resource equal to the class. | string | `"nginx"` | | `nginxGateway.gatewayControllerName` | The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain is gateway.nginx.org. | string | `"gateway.nginx.org/nginx-gateway-controller"` | | `nginxGateway.gwAPIExperimentalFeatures.enable` | Enable the experimental features of Gateway API which are supported by NGINX Gateway Fabric. Requires the Gateway APIs installed from the experimental channel. | bool | `false` | -| `nginxGateway.image.pullPolicy` | | string | `"Always"` | +| `nginxGateway.image` | The image configuration for the NGINX Gateway Fabric control plane. | object | `{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"}` | | `nginxGateway.image.repository` | The NGINX Gateway Fabric image to use | string | `"ghcr.io/nginx/nginx-gateway-fabric"` | -| `nginxGateway.image.tag` | | string | `"edge"` | | `nginxGateway.kind` | The kind of the NGINX Gateway Fabric installation - currently, only deployment is supported. | string | `"deployment"` | | `nginxGateway.labels` | Set of labels to be added for NGINX Gateway Fabric deployment. | object | `{}` | | `nginxGateway.leaderElection.enable` | Enable leader election. Leader election is used to avoid multiple replicas of the NGINX Gateway Fabric reporting the status of the Gateway API resources. If not enabled, all replicas of NGINX Gateway Fabric will update the statuses of the Gateway API resources. | bool | `true` | | `nginxGateway.leaderElection.lockName` | The name of the leader election lock. A Lease object with this name will be created in the same Namespace as the controller. | string | Autogenerated if not set or set to "". | | `nginxGateway.lifecycle` | The lifecycle of the nginx-gateway container. | object | `{}` | +| `nginxGateway.metrics.enable` | Enable exposing metrics in the Prometheus format. | bool | `true` | +| `nginxGateway.metrics.port` | Set the port where the Prometheus metrics are exposed. | int | `9113` | +| `nginxGateway.metrics.secure` | Enable serving metrics via https. By default metrics are served via http. Please note that this endpoint will be secured with a self-signed certificate. | bool | `false` | +| `nginxGateway.nodeSelector` | The nodeSelector of the NGINX Gateway Fabric control plane pod. | object | `{}` | | `nginxGateway.podAnnotations` | Set of custom annotations for the NGINX Gateway Fabric pods. | object | `{}` | | `nginxGateway.productTelemetry.enable` | Enable the collection of product telemetry. | bool | `true` | | `nginxGateway.readinessProbe.enable` | Enable the /readyz endpoint on the control plane. | bool | `true` | | `nginxGateway.readinessProbe.initialDelaySeconds` | The number of seconds after the Pod has started before the readiness probes are initiated. | int | `3` | | `nginxGateway.readinessProbe.port` | Port in which the readiness endpoint is exposed. | int | `8081` | -| `nginxGateway.replicaCount` | The number of replicas of the NGINX Gateway Fabric Deployment. | int | `1` | +| `nginxGateway.replicas` | The number of replicas of the NGINX Gateway Fabric Deployment. | int | `1` | | `nginxGateway.resources` | The resource requests and/or limits of the nginx-gateway container. | object | `{}` | +| `nginxGateway.service` | The service configuration for the NGINX Gateway Fabric control plane. | object | `{"annotations":{}}` | | `nginxGateway.service.annotations` | The annotations of the NGINX Gateway Fabric control plane service. | object | `{}` | +| `nginxGateway.serviceAccount` | The serviceaccount configuration for the NGINX Gateway Fabric control plane. | object | `{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""}` | +| `nginxGateway.serviceAccount.annotations` | Set of custom annotations for the NGINX Gateway Fabric control plane service account. | object | `{}` | +| `nginxGateway.serviceAccount.imagePullSecret` | The name of the secret containing docker registry credentials for the control plane. Secret must exist in the same namespace as the helm release. | string | `""` | +| `nginxGateway.serviceAccount.imagePullSecrets` | A list of secret names containing docker registry credentials for the control plane. Secrets must exist in the same namespace as the helm release. | list | `[]` | +| `nginxGateway.serviceAccount.name` | The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC. | string | Autogenerated if not set or set to "" | | `nginxGateway.snippetsFilters.enable` | Enable SnippetsFilters feature. SnippetsFilters allow inserting NGINX configuration into the generated NGINX config for HTTPRoute and GRPCRoute resources. | bool | `false` | -| `nodeSelector` | The nodeSelector of the NGINX Gateway Fabric pod. | object | `{}` | -| `service.annotations` | The annotations of the NGINX Gateway Fabric service. | object | `{}` | -| `service.create` | Creates a service to expose the NGINX Gateway Fabric pods. | bool | `true` | -| `service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | -| `service.loadBalancerIP` | The static IP address for the load balancer. Requires service.type set to LoadBalancer. | string | `""` | -| `service.loadBalancerSourceRanges` | The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer. | list | `[]` | -| `service.ports` | A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. | list | `[{"name":"http","port":80,"protocol":"TCP","targetPort":80},{"name":"https","port":443,"protocol":"TCP","targetPort":443}]` | -| `service.type` | The type of service to create for the NGINX Gateway Fabric. | string | `"LoadBalancer"` | -| `serviceAccount.annotations` | Set of custom annotations for the NGINX Gateway Fabric service account. | object | `{}` | -| `serviceAccount.imagePullSecret` | The name of the secret containing docker registry credentials. Secret must exist in the same namespace as the helm release. | string | `""` | -| `serviceAccount.imagePullSecrets` | A list of secret names containing docker registry credentials. Secrets must exist in the same namespace as the helm release. | list | `[]` | -| `serviceAccount.name` | The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. | string | Autogenerated if not set or set to "" | -| `terminationGracePeriodSeconds` | The termination grace period of the NGINX Gateway Fabric pod. | int | `30` | -| `tolerations` | Tolerations for the NGINX Gateway Fabric pod. | list | `[]` | -| `topologySpreadConstraints` | The topology spread constraints for the NGINX Gateway Fabric pod. | list | `[]` | +| `nginxGateway.terminationGracePeriodSeconds` | The termination grace period of the NGINX Gateway Fabric control plane pod. | int | `30` | +| `nginxGateway.tolerations` | Tolerations for the NGINX Gateway Fabric control plane pod. | list | `[]` | +| `nginxGateway.topologySpreadConstraints` | The topology spread constraints for the NGINX Gateway Fabric control plane pod. | list | `[]` | ---------------------------------------------- Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs) diff --git a/charts/nginx-gateway-fabric/README.md.gotmpl b/charts/nginx-gateway-fabric/README.md.gotmpl index f89de6bd00..6306d2a647 100644 --- a/charts/nginx-gateway-fabric/README.md.gotmpl +++ b/charts/nginx-gateway-fabric/README.md.gotmpl @@ -110,13 +110,13 @@ By default, the NGINX Gateway Fabric helm chart deploys a LoadBalancer Service. To use a NodePort Service instead: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.type=NodePort +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` To disable the creation of a Service: ```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set service.create=false +helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.create=false ``` ## Upgrading the Chart @@ -251,6 +251,9 @@ kubectl kustomize https://github.com/nginx/nginx-gateway-fabric/config/crd/gatew The following table lists the configurable parameters of the NGINX Gateway Fabric chart and their default values. +> More granular configuration options may not show up in this table. +> Viewing the `values.yaml` file directly can show all available options. + {{ template "chart.valuesTable" . }} ---------------------------------------------- diff --git a/charts/nginx-gateway-fabric/templates/_helpers.tpl b/charts/nginx-gateway-fabric/templates/_helpers.tpl index 65b6c5e6f8..2a137d5fbd 100644 --- a/charts/nginx-gateway-fabric/templates/_helpers.tpl +++ b/charts/nginx-gateway-fabric/templates/_helpers.tpl @@ -78,7 +78,7 @@ app.kubernetes.io/instance: {{ .Release.Name }} Create the name of the ServiceAccount to use */}} {{- define "nginx-gateway.serviceAccountName" -}} -{{- default (include "nginx-gateway.fullname" .) .Values.serviceAccount.name }} +{{- default (include "nginx-gateway.fullname" .) .Values.nginxGateway.serviceAccount.name }} {{- end }} {{/* diff --git a/charts/nginx-gateway-fabric/templates/clusterrole.yaml b/charts/nginx-gateway-fabric/templates/clusterrole.yaml index 830fe1b391..2ae9c5a2c0 100644 --- a/charts/nginx-gateway-fabric/templates/clusterrole.yaml +++ b/charts/nginx-gateway-fabric/templates/clusterrole.yaml @@ -7,14 +7,25 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets - - pods -{{- if .Values.nginxGateway.gwAPIExperimentalFeatures.enable }} - configmaps -{{- end }} + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces + - pods verbs: - get - list @@ -135,4 +146,6 @@ rules: - {{ include "nginx-gateway.scc-name" . }} verbs: - use + - create + - watch {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/configmap.yaml b/charts/nginx-gateway-fabric/templates/configmap.yaml deleted file mode 100644 index 8b99c60650..0000000000 --- a/charts/nginx-gateway-fabric/templates/configmap.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-includes-bootstrap - namespace: {{ .Release.Namespace }} - labels: - {{- include "nginx-gateway.labels" . | nindent 4 }} -data: - main.conf: | - {{- if and .Values.nginx.config .Values.nginx.config.logging .Values.nginx.config.logging.errorLevel }} - error_log stderr {{ .Values.nginx.config.logging.errorLevel }}; - {{ else }} - error_log stderr info; - {{- end }} - {{- if .Values.nginx.plus }} - mgmt.conf: | - mgmt { - {{- if .Values.nginx.usage.endpoint }} - usage_report endpoint={{ .Values.nginx.usage.endpoint }}; - {{- end }} - {{- if .Values.nginx.usage.skipVerify }} - ssl_verify off; - {{- end }} - {{- if .Values.nginx.usage.caSecretName }} - ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; - {{- end }} - {{- if .Values.nginx.usage.clientSSLSecretName }} - ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; - ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; - {{- end }} - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } - {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 33b965efd6..d76df22442 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -10,7 +10,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - replicas: {{ .Values.nginxGateway.replicaCount }} + replicas: {{ .Values.nginxGateway.replicas }} selector: matchLabels: {{- include "nginx-gateway.selectorLabels" . | nindent 6 }} @@ -21,24 +21,20 @@ spec: {{- with .Values.nginxGateway.labels }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if or .Values.nginxGateway.podAnnotations .Values.metrics.enable }} + {{- if or .Values.nginxGateway.podAnnotations .Values.nginxGateway.metrics.enable }} annotations: {{- if .Values.nginxGateway.podAnnotations }} {{- toYaml .Values.nginxGateway.podAnnotations | nindent 8 }} {{- end }} - {{- if .Values.metrics.enable }} + {{- if .Values.nginxGateway.metrics.enable }} prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.port }}" - {{- if .Values.metrics.secure }} + prometheus.io/port: "{{ .Values.nginxGateway.metrics.port }}" + {{- if .Values.nginxGateway.metrics.secure }} prometheus.io/scheme: "https" {{- end }} {{- end }} {{- end }} spec: - {{- if .Values.topologySpreadConstraints }} - topologySpreadConstraints: - {{- toYaml .Values.topologySpreadConstraints | nindent 8 }} - {{- end }} containers: - args: - static-mode @@ -67,9 +63,9 @@ spec: - --usage-report-client-ssl-secret={{ .Values.nginx.usage.clientSSLSecretName }} {{- end }} {{- end }} - {{- if .Values.metrics.enable }} - - --metrics-port={{ .Values.metrics.port }} - {{- if .Values.metrics.secure }} + {{- if .Values.nginxGateway.metrics.enable }} + - --metrics-port={{ .Values.nginxGateway.metrics.port }} + {{- if .Values.nginxGateway.metrics.secure }} - --metrics-secure-serving {{- end }} {{- else }} @@ -95,10 +91,6 @@ spec: - --snippets-filters {{- end }} env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -111,6 +103,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} name: nginx-gateway @@ -125,9 +123,9 @@ spec: ports: - name: agent-grpc containerPort: 8443 - {{- if .Values.metrics.enable }} + {{- if .Values.nginxGateway.metrics.enable }} - name: metrics - containerPort: {{ .Values.metrics.port }} + containerPort: {{ .Values.nginxGateway.metrics.port }} {{- end }} {{- if .Values.nginxGateway.readinessProbe.enable }} - name: health @@ -152,24 +150,28 @@ spec: {{- with .Values.nginxGateway.extraVolumeMounts -}} {{ toYaml . | nindent 8 }} {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- if .Values.affinity }} + {{- if .Values.nginxGateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml .Values.nginxGateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.nginxGateway.terminationGracePeriodSeconds }} + {{- if .Values.nginxGateway.affinity }} affinity: - {{- toYaml .Values.affinity | nindent 8 }} + {{- toYaml .Values.nginxGateway.affinity | nindent 8 }} {{- end }} serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} securityContext: fsGroup: 1001 runAsNonRoot: true - {{- if .Values.tolerations }} + {{- if .Values.nginxGateway.tolerations }} tolerations: - {{- toYaml .Values.tolerations | nindent 6 }} + {{- toYaml .Values.nginxGateway.tolerations | nindent 6 }} {{- end }} - {{- if .Values.nodeSelector }} + {{- if .Values.nginxGateway.nodeSelector }} nodeSelector: - {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- toYaml .Values.nginxGateway.nodeSelector | nindent 8 }} {{- end }} - {{- with .Values.extraVolumes -}} + {{- with .Values.nginxGateway.extraVolumes -}} {{ toYaml . | nindent 6 }} {{- end }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml index aecd54e8ad..b6905cd33c 100644 --- a/charts/nginx-gateway-fabric/templates/gatewayclass.yaml +++ b/charts/nginx-gateway-fabric/templates/gatewayclass.yaml @@ -12,10 +12,8 @@ metadata: {{- end }} spec: controllerName: {{ .Values.nginxGateway.gatewayControllerName }} - {{- if .Values.nginx.config }} parametersRef: group: gateway.nginx.org kind: NginxProxy name: {{ include "nginx-gateway.proxy-config-name" . }} namespace: {{ .Release.Namespace }} - {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml index bc4105ee37..1dd6f44155 100644 --- a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml +++ b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.nginx.config }} apiVersion: gateway.nginx.org/v1alpha2 kind: NginxProxy metadata: @@ -7,5 +6,25 @@ metadata: labels: {{- include "nginx-gateway.labels" . | nindent 4 }} spec: + {{- if .Values.nginx.config }} {{- toYaml .Values.nginx.config | nindent 2 }} -{{- end }} + {{- end }} + kubernetes: + {{- if eq .Values.nginx.kind "deployment" }} + deployment: + replicas: {{ .Values.nginx.replicas }} + {{- if .Values.nginx.pod }} + pod: + {{- toYaml .Values.nginx.pod | nindent 8 }} + {{- end }} + container: + {{- if .Values.nginx.container }} + {{- toYaml .Values.nginx.container | nindent 8 }} + {{- end }} + image: + {{- toYaml .Values.nginx.image | nindent 10 }} + {{- end }} + {{- if .Values.nginx.service }} + service: + {{- toYaml .Values.nginx.service | nindent 6 }} + {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/serviceaccount.yaml b/charts/nginx-gateway-fabric/templates/serviceaccount.yaml index 069a2066b9..fa3439759d 100644 --- a/charts/nginx-gateway-fabric/templates/serviceaccount.yaml +++ b/charts/nginx-gateway-fabric/templates/serviceaccount.yaml @@ -6,14 +6,14 @@ metadata: labels: {{- include "nginx-gateway.labels" . | nindent 4 }} annotations: - {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} -{{- if or .Values.serviceAccount.imagePullSecret .Values.serviceAccount.imagePullSecrets }} + {{- toYaml .Values.nginxGateway.serviceAccount.annotations | nindent 4 }} +{{- if or .Values.nginxGateway.serviceAccount.imagePullSecret .Values.nginxGateway.serviceAccount.imagePullSecrets }} imagePullSecrets: - {{- if .Values.serviceAccount.imagePullSecret }} - - name: {{ .Values.serviceAccount.imagePullSecret }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecret }} + - name: {{ .Values.nginxGateway.serviceAccount.imagePullSecret }} {{- end }} - {{- if .Values.serviceAccount.imagePullSecrets }} - {{- range .Values.serviceAccount.imagePullSecrets }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecrets }} + {{- range .Values.nginxGateway.serviceAccount.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml deleted file mode 100644 index 6e85efffeb..0000000000 --- a/charts/nginx-gateway-fabric/templates/tmp-nginx-agent-conf.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: {{ .Release.Namespace }} -data: - nginx-agent.conf: |- - command: - server: - host: {{ include "nginx-gateway.fullname" . }}.{{ .Release.Namespace }}.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - {{- if .Values.nginx.plus }} - - api-action - {{- end }} - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml deleted file mode 100644 index bb04bf46eb..0000000000 --- a/charts/nginx-gateway-fabric/templates/tmp-nginx-deployment.yaml +++ /dev/null @@ -1,204 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: {{ .Release.Namespace }} -spec: - selector: - matchLabels: - app.kubernetes.io/name: tmp-nginx-deployment - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app.kubernetes.io/name: tmp-nginx-deployment - app.kubernetes.io/instance: {{ .Release.Name }} - annotations: - {{- if .Values.metrics.enable }} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.port }}" - {{- if .Values.metrics.secure }} - prometheus.io/scheme: "https" - {{- end }} - {{- end }} - spec: - initContainers: - - name: init - image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} - imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - {{- if .Values.nginx.plus }} - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - {{- end }} - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - volumeMounts: - - name: nginx-agent-config - mountPath: /agent - - name: nginx-agent - mountPath: /etc/nginx-agent - - name: nginx-includes-bootstrap - mountPath: /includes - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - containers: - - image: {{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag | default .Chart.AppVersion }} - imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} - name: nginx - {{- if .Values.nginx.lifecycle }} - lifecycle: - {{- toYaml .Values.nginx.lifecycle | nindent 10 }} - {{- end }} - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - name: metrics - containerPort: {{ .Values.metrics.port }} - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - volumeMounts: - - name: nginx-agent - mountPath: /etc/nginx-agent - - name: nginx-agent-log - mountPath: /var/log/nginx-agent - - name: nginx-conf - mountPath: /etc/nginx/conf.d - - name: nginx-stream-conf - mountPath: /etc/nginx/stream-conf.d - - name: nginx-main-includes - mountPath: /etc/nginx/main-includes - - name: nginx-secrets - mountPath: /etc/nginx/secrets - - name: nginx-run - mountPath: /var/run/nginx - - name: nginx-cache - mountPath: /var/cache/nginx - - name: nginx-includes - mountPath: /etc/nginx/includes - {{- if .Values.nginx.plus }} - - name: nginx-lib - mountPath: /var/lib/nginx/state - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license - mountPath: /etc/nginx/license.jwt - subPath: license.jwt - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - mountPath: /etc/nginx/certs-bootstrap/ - {{- end }} - {{- end }} - {{- with .Values.nginx.extraVolumeMounts -}} - {{ toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.nginx.debug }} - command: - - "/bin/sh" - args: - - "-c" - - "rm -rf /var/run/nginx/*.sock && nginx-debug -g 'daemon off;'" - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- if .Values.affinity }} - affinity: - {{- toYaml .Values.affinity | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "nginx-gateway.serviceAccountName" . }} - securityContext: - fsGroup: 1001 - runAsNonRoot: true - {{- if .Values.tolerations }} - tolerations: - {{- toYaml .Values.tolerations | nindent 6 }} - {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: - {{- toYaml .Values.nodeSelector | nindent 8 }} - {{- end }} - volumes: - - name: nginx-agent - emptyDir: {} - - name: nginx-agent-config - configMap: - name: nginx-agent-config - - name: nginx-agent-log - emptyDir: {} - - name: nginx-conf - emptyDir: {} - - name: nginx-stream-conf - emptyDir: {} - - name: nginx-main-includes - emptyDir: {} - - name: nginx-secrets - emptyDir: {} - - name: nginx-run - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-includes - emptyDir: {} - - name: nginx-includes-bootstrap - configMap: - name: nginx-includes-bootstrap - {{- if .Values.nginx.plus }} - - name: nginx-lib - emptyDir: {} - {{- if .Values.nginx.usage.secretName }} - - name: nginx-plus-license - secret: - secretName: {{ .Values.nginx.usage.secretName }} - {{- end }} - {{- if or .Values.nginx.usage.caSecretName .Values.nginx.usage.clientSSLSecretName }} - - name: nginx-plus-usage-certs - projected: - sources: - {{- if .Values.nginx.usage.caSecretName }} - - secret: - name: {{ .Values.nginx.usage.caSecretName }} - {{- end }} - {{- if .Values.nginx.usage.clientSSLSecretName }} - - secret: - name: {{ .Values.nginx.usage.clientSSLSecretName }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.extraVolumes -}} - {{ toYaml . | nindent 6 }} - {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml b/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml deleted file mode 100644 index 6b82fd1e78..0000000000 --- a/charts/nginx-gateway-fabric/templates/tmp-nginx-service.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if .Values.service.create }} -apiVersion: v1 -kind: Service -metadata: - name: tmp-nginx-deployment - namespace: {{ .Release.Namespace }} - labels: - {{- include "nginx-gateway.labels" . | nindent 4 }} -{{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -{{- end }} -spec: -{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} - {{- if .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} - {{- end }} -{{- end }} - type: {{ .Values.service.type }} -{{- if eq .Values.service.type "LoadBalancer" }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2 }} - {{- end }} -{{- end}} - selector: - app.kubernetes.io/name: tmp-nginx-deployment - app.kubernetes.io/instance: {{ .Release.Name }} - ports: # Update the following ports to match your Gateway Listener ports -{{- if .Values.service.ports }} -{{ toYaml .Values.service.ports | indent 2 }} -{{ end }} -{{- end }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 05279c85b4..9824d6ca3c 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -1,58 +1,14 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { - "affinity": { - "description": "The affinity of the NGINX Gateway Fabric pod.", - "required": [], - "title": "affinity", - "type": "object" - }, - "extraVolumes": { - "description": "extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with\nnginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers.", - "items": { - "required": [] - }, - "required": [], - "title": "extraVolumes", - "type": "array" - }, "global": { "description": "Global values are values that can be accessed from any chart or subchart by exactly the same name.", "required": [], "title": "global", "type": "object" }, - "metrics": { - "properties": { - "enable": { - "default": true, - "description": "Enable exposing metrics in the Prometheus format.", - "required": [], - "title": "enable", - "type": "boolean" - }, - "port": { - "default": 9113, - "description": "Set the port where the Prometheus metrics are exposed.", - "maximum": 65535, - "minimum": 1, - "required": [], - "title": "port", - "type": "integer" - }, - "secure": { - "default": false, - "description": "Enable serving metrics via https. By default metrics are served via http.\nPlease note that this endpoint will be secured with a self-signed certificate.", - "required": [], - "title": "secure", - "type": "boolean" - } - }, - "required": [], - "title": "metrics", - "type": "object" - }, "nginx": { + "description": "The nginx section contains the configuration for all NGINX data plane deployments\ninstalled by the NGINX Gateway Fabric control plane.", "properties": { "config": { "description": "The configuration for the data plane that is contained in the NginxProxy resource.", @@ -75,6 +31,17 @@ "logging": { "description": "Logging defines logging related settings for NGINX.", "properties": { + "agentLevel": { + "enum": [ + "debug", + "info", + "error", + "panic", + "fatal" + ], + "required": [], + "type": "string" + }, "errorLevel": { "enum": [ "debug", @@ -93,6 +60,23 @@ "required": [], "type": "object" }, + "metrics": { + "description": "Metrics defines the configuration for Prometheus scraping metrics.", + "properties": { + "disable": { + "required": [], + "type": "boolean" + }, + "port": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + } + }, + "required": [], + "type": "object" + }, "nginxPlus": { "description": "NginxPlus specifies NGINX Plus additional settings.", "properties": { @@ -239,6 +223,12 @@ "title": "config", "type": "object" }, + "container": { + "description": "The container configuration for the NGINX container.", + "required": [], + "title": "container", + "type": "object" + }, "debug": { "default": false, "description": "Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource.", @@ -246,15 +236,6 @@ "title": "debug", "type": "boolean" }, - "extraVolumeMounts": { - "description": "extraVolumeMounts are the additional volume mounts for the nginx container.", - "items": { - "required": [] - }, - "required": [], - "title": "extraVolumeMounts", - "type": "array" - }, "image": { "properties": { "pullPolicy": { @@ -285,19 +266,80 @@ "title": "image", "type": "object" }, - "lifecycle": { - "description": "The lifecycle of the nginx container.", + "imagePullSecret": { + "default": "", + "description": "The name of the secret containing docker registry credentials.\nSecret must exist in the same namespace as the helm release. The control\nplane will copy this secret into any namespace where NGINX is deployed.", "required": [], - "title": "lifecycle", - "type": "object" + "title": "imagePullSecret", + "type": "string" + }, + "imagePullSecrets": { + "description": "A list of secret names containing docker registry credentials.\nSecrets must exist in the same namespace as the helm release. The control\nplane will copy these secrets into any namespace where NGINX is deployed.", + "items": { + "required": [] + }, + "required": [], + "title": "imagePullSecrets", + "type": "array" + }, + "kind": { + "default": "deployment", + "description": "The kind of NGINX deployment.", + "enum": [ + "deployment" + ], + "required": [], + "title": "kind" }, "plus": { "default": false, - "description": "Is NGINX Plus image being used", + "description": "Is NGINX Plus image being used.", "required": [], "title": "plus", "type": "boolean" }, + "pod": { + "description": "The pod configuration for the NGINX data plane pod.", + "required": [], + "title": "pod", + "type": "object" + }, + "replicas": { + "default": 1, + "description": "The number of replicas of the NGINX Deployment.", + "required": [], + "title": "replicas", + "type": "integer" + }, + "service": { + "description": "The service configuration for the NGINX data plane.", + "properties": { + "externalTrafficPolicy": { + "default": "Local", + "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", + "enum": [ + "Cluster", + "Local" + ], + "required": [], + "title": "externalTrafficPolicy" + }, + "type": { + "default": "LoadBalancer", + "description": "The type of service to create for the NGINX data plane.", + "enum": [ + "ClusterIP", + "NodePort", + "LoadBalancer" + ], + "required": [], + "title": "type" + } + }, + "required": [], + "title": "service", + "type": "object" + }, "usage": { "description": "Configuration for NGINX Plus usage reporting.", "properties": { @@ -354,7 +396,14 @@ "type": "object" }, "nginxGateway": { + "description": "The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment.", "properties": { + "affinity": { + "description": "The affinity of the NGINX Gateway Fabric control plane pod.", + "required": [], + "title": "affinity", + "type": "object" + }, "config": { "description": "The dynamic configuration for the control plane that is contained in the NginxGateway resource.", "properties": { @@ -396,6 +445,15 @@ "title": "extraVolumeMounts", "type": "array" }, + "extraVolumes": { + "description": "extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with\nnginxGateway.extraVolumeMounts mount additional volumes to the container.", + "items": { + "required": [] + }, + "required": [], + "title": "extraVolumes", + "type": "array" + }, "gatewayClassAnnotations": { "description": "Set of custom annotations for GatewayClass objects.", "required": [], @@ -431,6 +489,7 @@ "type": "object" }, "image": { + "description": "The image configuration for the NGINX Gateway Fabric control plane.", "properties": { "pullPolicy": { "default": "Always", @@ -501,6 +560,42 @@ "title": "lifecycle", "type": "object" }, + "metrics": { + "properties": { + "enable": { + "default": true, + "description": "Enable exposing metrics in the Prometheus format.", + "required": [], + "title": "enable", + "type": "boolean" + }, + "port": { + "default": 9113, + "description": "Set the port where the Prometheus metrics are exposed.", + "maximum": 65535, + "minimum": 1, + "required": [], + "title": "port", + "type": "integer" + }, + "secure": { + "default": false, + "description": "Enable serving metrics via https. By default metrics are served via http.\nPlease note that this endpoint will be secured with a self-signed certificate.", + "required": [], + "title": "secure", + "type": "boolean" + } + }, + "required": [], + "title": "metrics", + "type": "object" + }, + "nodeSelector": { + "description": "The nodeSelector of the NGINX Gateway Fabric control plane pod.", + "required": [], + "title": "nodeSelector", + "type": "object" + }, "podAnnotations": { "description": "Set of custom annotations for the NGINX Gateway Fabric pods.", "required": [], @@ -552,11 +647,11 @@ "title": "readinessProbe", "type": "object" }, - "replicaCount": { + "replicas": { "default": 1, "description": "The number of replicas of the NGINX Gateway Fabric Deployment.", "required": [], - "title": "replicaCount", + "title": "replicas", "type": "integer" }, "resources": { @@ -566,6 +661,7 @@ "type": "object" }, "service": { + "description": "The service configuration for the NGINX Gateway Fabric control plane.", "properties": { "annotations": { "description": "The annotations of the NGINX Gateway Fabric control plane service.", @@ -578,6 +674,43 @@ "title": "service", "type": "object" }, + "serviceAccount": { + "description": "The serviceaccount configuration for the NGINX Gateway Fabric control plane.", + "properties": { + "annotations": { + "description": "Set of custom annotations for the NGINX Gateway Fabric control plane service account.", + "required": [], + "title": "annotations", + "type": "object" + }, + "imagePullSecret": { + "default": "", + "description": "The name of the secret containing docker registry credentials for the control plane.\nSecret must exist in the same namespace as the helm release.", + "required": [], + "title": "imagePullSecret", + "type": "string" + }, + "imagePullSecrets": { + "description": "A list of secret names containing docker registry credentials for the control plane.\nSecrets must exist in the same namespace as the helm release.", + "items": { + "required": [] + }, + "required": [], + "title": "imagePullSecrets", + "type": "array" + }, + "name": { + "default": "", + "description": "The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC.", + "required": [], + "title": "name", + "type": "string" + } + }, + "required": [], + "title": "serviceAccount", + "type": "object" + }, "snippetsFilters": { "properties": { "enable": { @@ -591,174 +724,39 @@ "required": [], "title": "snippetsFilters", "type": "object" - } - }, - "required": [ - "gatewayClassName", - "gatewayControllerName" - ], - "title": "nginxGateway", - "type": "object" - }, - "nodeSelector": { - "description": "The nodeSelector of the NGINX Gateway Fabric pod.", - "required": [], - "title": "nodeSelector", - "type": "object" - }, - "service": { - "properties": { - "annotations": { - "description": "The annotations of the NGINX Gateway Fabric service.", - "required": [], - "title": "annotations", - "type": "object" - }, - "create": { - "default": true, - "description": "Creates a service to expose the NGINX Gateway Fabric pods.", - "required": [], - "title": "create", - "type": "boolean" }, - "externalTrafficPolicy": { - "default": "Local", - "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", - "enum": [ - "Cluster", - "Local" - ], + "terminationGracePeriodSeconds": { + "default": 30, + "description": "The termination grace period of the NGINX Gateway Fabric control plane pod.", "required": [], - "title": "externalTrafficPolicy" - }, - "loadBalancerIP": { - "default": "", - "description": "The static IP address for the load balancer. Requires service.type set to LoadBalancer.", - "required": [], - "title": "loadBalancerIP", - "type": "string" + "title": "terminationGracePeriodSeconds", + "type": "integer" }, - "loadBalancerSourceRanges": { - "description": "The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer.", + "tolerations": { + "description": "Tolerations for the NGINX Gateway Fabric control plane pod.", "items": { "required": [] }, "required": [], - "title": "loadBalancerSourceRanges", + "title": "tolerations", "type": "array" }, - "ports": { - "description": "A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from\nyour Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports.", - "items": { - "properties": { - "name": { - "required": [], - "type": "string" - }, - "port": { - "maximum": 65535, - "minimum": 1, - "required": [], - "type": "integer" - }, - "protocol": { - "enum": [ - "TCP", - "UDP" - ], - "required": [], - "type": "string" - }, - "targetPort": { - "maximum": 65535, - "minimum": 1, - "required": [], - "type": "integer" - } - }, - "required": [], - "type": "object" - }, - "required": [], - "title": "ports", - "type": "array" - }, - "type": { - "default": "LoadBalancer", - "description": "The type of service to create for the NGINX Gateway Fabric.", - "enum": [ - "ClusterIP", - "NodePort", - "LoadBalancer" - ], - "required": [], - "title": "type" - } - }, - "required": [], - "title": "service", - "type": "object" - }, - "serviceAccount": { - "properties": { - "annotations": { - "description": "Set of custom annotations for the NGINX Gateway Fabric service account.", - "required": [], - "title": "annotations", - "type": "object" - }, - "imagePullSecret": { - "default": "", - "description": "The name of the secret containing docker registry credentials.\nSecret must exist in the same namespace as the helm release.", - "required": [], - "title": "imagePullSecret", - "type": "string" - }, - "imagePullSecrets": { - "description": "A list of secret names containing docker registry credentials.\nSecrets must exist in the same namespace as the helm release.", + "topologySpreadConstraints": { + "description": "The topology spread constraints for the NGINX Gateway Fabric control plane pod.", "items": { "required": [] }, "required": [], - "title": "imagePullSecrets", + "title": "topologySpreadConstraints", "type": "array" - }, - "name": { - "default": "", - "description": "The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC.", - "required": [], - "title": "name", - "type": "string" } }, - "required": [], - "title": "serviceAccount", + "required": [ + "gatewayClassName", + "gatewayControllerName" + ], + "title": "nginxGateway", "type": "object" - }, - "terminationGracePeriodSeconds": { - "default": 30, - "description": "The termination grace period of the NGINX Gateway Fabric pod.", - "required": [], - "title": "terminationGracePeriodSeconds", - "type": "integer" - }, - "tolerations": { - "description": "Tolerations for the NGINX Gateway Fabric pod.", - "items": { - "required": [] - }, - "required": [], - "title": "tolerations", - "type": "array" - }, - "topologySpreadConstraints": { - "description": "The topology spread constraints for the NGINX Gateway Fabric pod.", - "items": { - "required": [] - }, - "required": [], - "title": "topologySpreadConstraints", - "type": "array" } }, "required": [], diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index eb20ec601c..fadd9f4489 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -1,5 +1,6 @@ # yaml-language-server: $schema=values.schema.json +# -- The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. nginxGateway: # FIXME(lucacome): https://github.com/nginx/nginx-gateway-fabric/issues/2490 @@ -50,12 +51,30 @@ nginxGateway: # -- Set of custom annotations for NginxGateway objects. configAnnotations: {} + # -- The service configuration for the NGINX Gateway Fabric control plane. service: # -- The annotations of the NGINX Gateway Fabric control plane service. annotations: {} + # -- The serviceaccount configuration for the NGINX Gateway Fabric control plane. + serviceAccount: + # -- Set of custom annotations for the NGINX Gateway Fabric control plane service account. + annotations: {} + + # -- The name of the service account of the NGINX Gateway Fabric control plane pods. Used for RBAC. + # @default -- Autogenerated if not set or set to "" + name: "" + + # -- The name of the secret containing docker registry credentials for the control plane. + # Secret must exist in the same namespace as the helm release. + imagePullSecret: "" + + # -- A list of secret names containing docker registry credentials for the control plane. + # Secrets must exist in the same namespace as the helm release. + imagePullSecrets: [] + # -- The number of replicas of the NGINX Gateway Fabric Deployment. - replicaCount: 1 + replicas: 1 # The configuration for leader election. leaderElection: @@ -86,6 +105,7 @@ nginxGateway: # -- The number of seconds after the Pod has started before the readiness probes are initiated. initialDelaySeconds: 3 + # -- The image configuration for the NGINX Gateway Fabric control plane. image: # -- The NGINX Gateway Fabric image to use repository: ghcr.io/nginx/nginx-gateway-fabric @@ -108,9 +128,44 @@ nginxGateway: # -- The resource requests and/or limits of the nginx-gateway container. resources: {} + # -- extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with + # nginxGateway.extraVolumeMounts mount additional volumes to the container. + extraVolumes: [] + # -- extraVolumeMounts are the additional volume mounts for the nginx-gateway container. extraVolumeMounts: [] + # -- The termination grace period of the NGINX Gateway Fabric control plane pod. + terminationGracePeriodSeconds: 30 + + # -- Tolerations for the NGINX Gateway Fabric control plane pod. + tolerations: [] + + # -- The nodeSelector of the NGINX Gateway Fabric control plane pod. + nodeSelector: {} + + # -- The affinity of the NGINX Gateway Fabric control plane pod. + affinity: {} + + # -- The topology spread constraints for the NGINX Gateway Fabric control plane pod. + topologySpreadConstraints: [] + + metrics: + # -- Enable exposing metrics in the Prometheus format. + enable: true + + # @schema + # type: integer + # minimum: 1 + # maximum: 65535 + # @schema + # -- Set the port where the Prometheus metrics are exposed. + port: 9113 + + # -- Enable serving metrics via https. By default metrics are served via http. + # Please note that this endpoint will be secured with a self-signed certificate. + secure: false + gwAPIExperimentalFeatures: # -- Enable the experimental features of Gateway API which are supported by NGINX Gateway Fabric. Requires the Gateway # APIs installed from the experimental channel. @@ -121,7 +176,19 @@ nginxGateway: # config for HTTPRoute and GRPCRoute resources. enable: false +# -- The nginx section contains the configuration for all NGINX data plane deployments +# installed by the NGINX Gateway Fabric control plane. nginx: + # @schema + # enum: + # - deployment + # @schema + # -- The kind of NGINX deployment. + kind: deployment + + # -- The number of replicas of the NGINX Deployment. + replicas: 1 + image: # -- The NGINX image to use. repository: ghcr.io/nginx/nginx-gateway-fabric/nginx @@ -134,9 +201,19 @@ nginx: # @schema pullPolicy: Always - # -- Is NGINX Plus image being used + # -- Is NGINX Plus image being used. plus: false + # -- The name of the secret containing docker registry credentials. + # Secret must exist in the same namespace as the helm release. The control + # plane will copy this secret into any namespace where NGINX is deployed. + imagePullSecret: "" + + # -- A list of secret names containing docker registry credentials. + # Secrets must exist in the same namespace as the helm release. The control + # plane will copy these secrets into any namespace where NGINX is deployed. + imagePullSecrets: [] + # Configuration for NGINX Plus usage reporting. usage: # -- The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace @@ -238,6 +315,16 @@ nginx: # type: string # enum: # - DisableTracing + # metrics: + # type: object + # description: Metrics defines the configuration for Prometheus scraping metrics. + # properties: + # disable: + # type: boolean + # port: + # type: integer + # minimum: 1 + # maximum: 65535 # logging: # type: object # description: Logging defines logging related settings for NGINX. @@ -253,6 +340,14 @@ nginx: # - crit # - alert # - emerg + # agentLevel: + # type: string + # enum: + # - debug + # - info + # - error + # - panic + # - fatal # nginxPlus: # type: object # description: NginxPlus specifies NGINX Plus additional settings. @@ -272,125 +367,65 @@ nginx: # -- The configuration for the data plane that is contained in the NginxProxy resource. config: {} - # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. - debug: false - - # -- The lifecycle of the nginx container. - lifecycle: {} + # -- The pod configuration for the NGINX data plane pod. + pod: {} + # -- The termination grace period of the NGINX data plane pod. + # terminationGracePeriodSeconds: 30 - # -- extraVolumeMounts are the additional volume mounts for the nginx container. - extraVolumeMounts: [] + # -- Tolerations for the NGINX Gateway Fabric control plane pod. + # tolerations: [] -# -- The termination grace period of the NGINX Gateway Fabric pod. -terminationGracePeriodSeconds: 30 + # -- The nodeSelector of the NGINX Gateway Fabric control plane pod. + # nodeSelector: {} -# -- Tolerations for the NGINX Gateway Fabric pod. -tolerations: [] + # -- The affinity of the NGINX Gateway Fabric control plane pod. + # affinity: {} -# -- The nodeSelector of the NGINX Gateway Fabric pod. -nodeSelector: {} + # -- The topology spread constraints for the NGINX Gateway Fabric control plane pod. + # topologySpreadConstraints: [] -# -- The affinity of the NGINX Gateway Fabric pod. -affinity: {} + # -- extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with + # nginx.container.extraVolumeMounts mount additional volumes to the container. + # extraVolumes: [] -# -- The topology spread constraints for the NGINX Gateway Fabric pod. -topologySpreadConstraints: [] + # -- The container configuration for the NGINX container. + container: {} + # -- The resource requirements of the NGINX container. + # resources: {} -serviceAccount: - # -- Set of custom annotations for the NGINX Gateway Fabric service account. - annotations: {} + # -- The lifecycle of the NGINX container. + # lifecycle: {} - # -- The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. - # @default -- Autogenerated if not set or set to "" - name: "" - - # -- The name of the secret containing docker registry credentials. - # Secret must exist in the same namespace as the helm release. - imagePullSecret: "" - - # -- A list of secret names containing docker registry credentials. - # Secrets must exist in the same namespace as the helm release. - imagePullSecrets: [] - -service: - # -- Creates a service to expose the NGINX Gateway Fabric pods. - create: true - - # @schema - # enum: - # - ClusterIP - # - NodePort - # - LoadBalancer - # @schema - # -- The type of service to create for the NGINX Gateway Fabric. - type: LoadBalancer + # -- extraVolumeMounts are the additional volume mounts for the NGINX container. + # extraVolumeMounts: [] - # @schema - # enum: - # - Cluster - # - Local - # @schema - # -- The externalTrafficPolicy of the service. The value Local preserves the client source IP. - externalTrafficPolicy: Local - - # -- The annotations of the NGINX Gateway Fabric service. - annotations: {} - - # -- The static IP address for the load balancer. Requires service.type set to LoadBalancer. - loadBalancerIP: "" + # -- The service configuration for the NGINX data plane. + service: + # @schema + # enum: + # - ClusterIP + # - NodePort + # - LoadBalancer + # @schema + # -- The type of service to create for the NGINX data plane. + type: LoadBalancer - # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires service.type set to LoadBalancer. - loadBalancerSourceRanges: [] + # @schema + # enum: + # - Cluster + # - Local + # @schema + # -- The externalTrafficPolicy of the service. The value Local preserves the client source IP. + externalTrafficPolicy: Local - # @schema - # type: array - # items: - # type: object - # properties: - # port: - # type: integer - # minimum: 1 - # maximum: 65535 - # targetPort: - # type: integer - # minimum: 1 - # maximum: 65535 - # protocol: - # type: string - # enum: - # - TCP - # - UDP - # name: - # type: string - # @schema - # -- A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from - # your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - -metrics: - # -- Enable exposing metrics in the Prometheus format. - enable: true + # -- The annotations of the NGINX data plane service. + # annotations: {} - # @schema - # type: integer - # minimum: 1 - # maximum: 65535 - # @schema - # -- Set the port where the Prometheus metrics are exposed. - port: 9113 + # -- The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. + # loadBalancerIP: "" - # -- Enable serving metrics via https. By default metrics are served via http. - # Please note that this endpoint will be secured with a self-signed certificate. - secure: false + # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. + # loadBalancerSourceRanges: [] -# -- extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with -# nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. -extraVolumes: [] + # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. + debug: false diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index da92044068..0a572c922e 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -6,6 +6,7 @@ import ( "os" "runtime/debug" "strconv" + "strings" "time" "github.com/spf13/cobra" @@ -206,7 +207,7 @@ func createStaticModeCommand() *cobra.Command { flagKeys, flagValues := parseFlags(cmd.Flags()) - podConfig, err := createGatewayPodConfig(serviceName.value) + podConfig, err := createGatewayPodConfig(version, serviceName.value) if err != nil { return fmt.Errorf("error creating gateway pod config: %w", err) } @@ -242,7 +243,6 @@ func createStaticModeCommand() *cobra.Command { EndpointInsecure: telemetryEndpointInsecure, }, Plus: plus, - Version: version, ExperimentalFeatures: gwExperimentalFeatures, ImageSource: imageSource, Flags: config.Flags{ @@ -649,33 +649,46 @@ func getBuildInfo() (commitHash string, commitTime string, dirtyBuild string) { return } -func createGatewayPodConfig(svcName string) (config.GatewayPodConfig, error) { - podIP, err := getValueFromEnv("POD_IP") +func createGatewayPodConfig(version, svcName string) (config.GatewayPodConfig, error) { + podUID, err := getValueFromEnv("POD_UID") if err != nil { return config.GatewayPodConfig{}, err } - podUID, err := getValueFromEnv("POD_UID") + ns, err := getValueFromEnv("POD_NAMESPACE") if err != nil { return config.GatewayPodConfig{}, err } - ns, err := getValueFromEnv("POD_NAMESPACE") + name, err := getValueFromEnv("POD_NAME") if err != nil { return config.GatewayPodConfig{}, err } - name, err := getValueFromEnv("POD_NAME") + instance, err := getValueFromEnv("INSTANCE_NAME") if err != nil { return config.GatewayPodConfig{}, err } + image, err := getValueFromEnv("IMAGE_NAME") + if err != nil { + return config.GatewayPodConfig{}, err + } + + // use image tag version if set, otherwise fall back to binary version + ngfVersion := version + if imageParts := strings.Split(image, ":"); len(imageParts) == 2 { + ngfVersion = imageParts[1] + } + c := config.GatewayPodConfig{ - PodIP: podIP, - ServiceName: svcName, - Namespace: ns, - Name: name, - UID: podUID, + ServiceName: svcName, + Namespace: ns, + Name: name, + UID: podUID, + InstanceName: instance, + Version: ngfVersion, + Image: image, } return c, nil diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 2c1ac5d266..b58fa3331b 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -669,43 +669,62 @@ func TestCreateGatewayPodConfig(t *testing.T) { // Order matters here // We start with all env vars set - g.Expect(os.Setenv("POD_IP", "10.0.0.0")).To(Succeed()) g.Expect(os.Setenv("POD_UID", "1234")).To(Succeed()) g.Expect(os.Setenv("POD_NAMESPACE", "default")).To(Succeed()) g.Expect(os.Setenv("POD_NAME", "my-pod")).To(Succeed()) + g.Expect(os.Setenv("INSTANCE_NAME", "my-pod-xyz")).To(Succeed()) + g.Expect(os.Setenv("IMAGE_NAME", "my-pod-image:tag")).To(Succeed()) + + version := "0.0.0" expCfg := config.GatewayPodConfig{ - PodIP: "10.0.0.0", - ServiceName: "svc", - Namespace: "default", - Name: "my-pod", - UID: "1234", + ServiceName: "svc", + Namespace: "default", + Name: "my-pod", + UID: "1234", + InstanceName: "my-pod-xyz", + Version: "tag", + Image: "my-pod-image:tag", } - cfg, err := createGatewayPodConfig("svc") + cfg, err := createGatewayPodConfig(version, "svc") g.Expect(err).To(Not(HaveOccurred())) g.Expect(cfg).To(Equal(expCfg)) + // unset image tag and use provided version + g.Expect(os.Setenv("IMAGE_NAME", "my-pod-image")).To(Succeed()) + expCfg.Version = version + expCfg.Image = "my-pod-image" + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(Not(HaveOccurred())) + g.Expect(cfg).To(Equal(expCfg)) + + // unset image name + g.Expect(os.Unsetenv("IMAGE_NAME")).To(Succeed()) + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(MatchError(errors.New("environment variable IMAGE_NAME not set"))) + g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) + + // unset instance name + g.Expect(os.Unsetenv("INSTANCE_NAME")).To(Succeed()) + cfg, err = createGatewayPodConfig(version, "svc") + g.Expect(err).To(MatchError(errors.New("environment variable INSTANCE_NAME not set"))) + g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) + // unset name g.Expect(os.Unsetenv("POD_NAME")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_NAME not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) // unset namespace g.Expect(os.Unsetenv("POD_NAMESPACE")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_NAMESPACE not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) // unset pod UID g.Expect(os.Unsetenv("POD_UID")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") + cfg, err = createGatewayPodConfig(version, "svc") g.Expect(err).To(MatchError(errors.New("environment variable POD_UID not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) - - // unset IP - g.Expect(os.Unsetenv("POD_IP")).To(Succeed()) - cfg, err = createGatewayPodConfig("svc") - g.Expect(err).To(MatchError(errors.New("environment variable POD_IP not set"))) - g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) } diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index d0772be9f6..ea0f93d9f5 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -66,9 +66,3455 @@ spec: - ipv4 - ipv6 type: string + kubernetes: + description: Kubernetes contains the configuration for the NGINX Deployment + and Service Kubernetes objects. + properties: + deployment: + description: |- + Deployment is the configuration for the NGINX Deployment. + This is the default deployment option. + properties: + container: + description: Container defines container fields for the NGINX + container. + properties: + image: + description: Image is the NGINX image to use. + properties: + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image. + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + description: |- + Repository is the image path. + Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + type: string + tag: + description: Tag is the image tag to use. Default + matches the tag of the control plane. + type: string + type: object + lifecycle: + description: |- + Lifecycle describes actions that the management system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + until the action is complete, unless the container process fails, in which case the handler is aborted. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + resources: + description: Resources describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + volumeMounts: + description: VolumeMounts describe the mounting of Volumes + within a container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + pod: + description: Pod defines Pod-specific fields. + properties: + affinity: + description: Affinity is the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + type: object + terminationGracePeriodSeconds: + description: |- + TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: Tolerations allow the scheduler to schedule + Pods with matching taints. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + description: Volumes represents named volumes in a pod + that may be accessed by any container in the pod. + items: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk + in the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the + mounted root, rather than the full Ceph tree, + default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API + about the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API + volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun + number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the + dataset. This is unique identifier of a Flocker + dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for + the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun + number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a + Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and + uid are supported.' + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or contain + the ''..'' path. Must be utf-8 + encoded. The first item of + the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about + the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of + the ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile ID + associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + replicas: + description: Number of desired Pods. + format: int32 + type: integer + type: object + service: + description: Service is the configuration for the NGINX Service. + properties: + annotations: + additionalProperties: + type: string + description: Annotations contain any Service-specific annotations. + type: object + externalTrafficPolicy: + default: Local + description: |- + ExternalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + and LoadBalancer IPs. + enum: + - Cluster + - Local + type: string + loadBalancerIP: + description: LoadBalancerIP is a static IP address for the + load balancer. Requires service type to be LoadBalancer. + type: string + loadBalancerSourceRanges: + description: |- + LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + Requires service type to be LoadBalancer. + items: + type: string + type: array + type: + default: LoadBalancer + description: ServiceType describes ingress method for the + Service. + enum: + - ClusterIP + - LoadBalancer + - NodePort + type: string + type: object + type: object logging: description: Logging defines logging related settings for NGINX. properties: + agentLevel: + default: info + description: |- + AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + re-roll of the NGINX deployment. + enum: + - debug + - info + - error + - panic + - fatal + type: string errorLevel: default: info description: |- @@ -87,6 +3533,22 @@ spec: - emerg type: string type: object + metrics: + description: |- + Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + re-roll of the NGINX deployment. + properties: + disable: + description: Disable serving Prometheus metrics on the listen + port. + type: boolean + port: + description: Port where the Prometheus metrics are exposed. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object nginxPlus: description: NginxPlus specifies NGINX Plus additional settings. properties: diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml index 2a53887183..7c1b2df7e9 100644 --- a/config/tests/static-deployment.yaml +++ b/config/tests/static-deployment.yaml @@ -33,10 +33,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --product-telemetry-disable env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -49,6 +45,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index c983b691fd..d49067708b 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -141,60 +155,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -214,34 +174,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-type: external - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -277,10 +209,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -293,6 +221,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -325,135 +259,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -464,6 +269,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -477,3 +287,28 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-type: external + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 0e0330d243..e7d0ef976d 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -141,60 +155,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -214,31 +174,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -274,10 +209,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -290,6 +221,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -324,137 +261,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - nodeSelector: - kubernetes.io/os: linux - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -465,6 +271,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -478,3 +289,28 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + pod: + nodeSelector: + kubernetes.io/os: linux + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 8a4a379c83..2d18df8928 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -651,9 +651,3455 @@ spec: - ipv4 - ipv6 type: string + kubernetes: + description: Kubernetes contains the configuration for the NGINX Deployment + and Service Kubernetes objects. + properties: + deployment: + description: |- + Deployment is the configuration for the NGINX Deployment. + This is the default deployment option. + properties: + container: + description: Container defines container fields for the NGINX + container. + properties: + image: + description: Image is the NGINX image to use. + properties: + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image. + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + description: |- + Repository is the image path. + Default is ghcr.io/nginx/nginx-gateway-fabric/nginx. + type: string + tag: + description: Tag is the image tag to use. Default + matches the tag of the control plane. + type: string + type: object + lifecycle: + description: |- + Lifecycle describes actions that the management system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, management of the container blocks + until the action is complete, unless the container process fails, in which case the handler is aborted. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + resources: + description: Resources describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + volumeMounts: + description: VolumeMounts describe the mounting of Volumes + within a container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + pod: + description: Pod defines Pod-specific fields. + properties: + affinity: + description: Affinity is the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + type: object + terminationGracePeriodSeconds: + description: |- + TerminationGracePeriodSeconds is the optional duration in seconds the pod needs to terminate gracefully. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: Tolerations allow the scheduler to schedule + Pods with matching taints. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + description: Volumes represents named volumes in a pod + that may be accessed by any container in the pod. + items: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk + in the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the + mounted root, rather than the full Ceph tree, + default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API + about the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API + volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun + number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the + dataset. This is unique identifier of a Flocker + dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for + the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun + number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a + Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and + uid are supported.' + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or contain + the ''..'' path. Must be utf-8 + encoded. The first item of + the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about + the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of + the ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile ID + associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + replicas: + description: Number of desired Pods. + format: int32 + type: integer + type: object + service: + description: Service is the configuration for the NGINX Service. + properties: + annotations: + additionalProperties: + type: string + description: Annotations contain any Service-specific annotations. + type: object + externalTrafficPolicy: + default: Local + description: |- + ExternalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, + and LoadBalancer IPs. + enum: + - Cluster + - Local + type: string + loadBalancerIP: + description: LoadBalancerIP is a static IP address for the + load balancer. Requires service type to be LoadBalancer. + type: string + loadBalancerSourceRanges: + description: |- + LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. + Requires service type to be LoadBalancer. + items: + type: string + type: array + type: + default: LoadBalancer + description: ServiceType describes ingress method for the + Service. + enum: + - ClusterIP + - LoadBalancer + - NodePort + type: string + type: object + type: object logging: description: Logging defines logging related settings for NGINX. properties: + agentLevel: + default: info + description: |- + AgentLevel defines the log level of the NGINX agent process. Changing this value results in a + re-roll of the NGINX deployment. + enum: + - debug + - info + - error + - panic + - fatal + type: string errorLevel: default: info description: |- @@ -672,6 +4118,22 @@ spec: - emerg type: string type: object + metrics: + description: |- + Metrics defines the configuration for Prometheus scraping metrics. Changing this value results in a + re-roll of the NGINX deployment. + properties: + disable: + description: Disable serving Prometheus metrics on the listen + port. + type: boolean + port: + description: Port where the Prometheus metrics are exposed. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object nginxPlus: description: NginxPlus specifies NGINX Plus additional settings. properties: diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index 8e51e699fc..42cc36de4a 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -141,60 +155,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -214,31 +174,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -274,10 +209,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -290,6 +221,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -322,135 +259,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -461,6 +269,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -474,3 +287,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 009dd2aaad..88f5e771cd 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -26,12 +24,25 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets - - pods - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces + - pods verbs: - get - list @@ -148,66 +159,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - - api-action - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -227,31 +178,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -290,10 +216,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --gateway-api-experimental-features env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -306,6 +228,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -338,150 +266,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -492,6 +276,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -505,3 +294,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index c847f0a4cd..15311817cc 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -24,12 +24,25 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets - - pods - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces + - pods verbs: - get - list @@ -146,60 +159,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -219,31 +178,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -280,10 +214,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --gateway-api-experimental-features env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -296,6 +226,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -328,135 +264,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -467,6 +274,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -480,3 +292,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 282a7b8878..ca6be2dd91 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -26,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -143,66 +155,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - - api-action - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -222,31 +174,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -284,10 +211,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -300,6 +223,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -332,150 +261,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -486,6 +271,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -499,3 +289,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index ec4d874a80..fa29623d9a 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -141,60 +155,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -214,31 +174,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: NodePort ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -274,10 +209,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -290,6 +221,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -322,135 +259,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -461,6 +269,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -474,3 +287,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: NodePort diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index b22981cd1b..e2701bf885 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -130,6 +144,8 @@ rules: - securitycontextconstraints verbs: - use + - create + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -149,60 +165,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -222,31 +184,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -282,10 +219,6 @@ spec: - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -298,6 +231,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -330,135 +269,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -469,6 +279,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -483,6 +298,28 @@ spec: logging: level: info --- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer +--- allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index ffed3588fd..f452442bef 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -4,8 +4,6 @@ metadata: name: nginx-gateway --- apiVersion: v1 -imagePullSecrets: -- name: nginx-plus-registry-secret kind: ServiceAccount metadata: labels: @@ -26,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -145,66 +157,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - - api-action - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; - mgmt.conf: | - mgmt { - enforce_initial_report off; - deployment_context /etc/nginx/main-includes/deployment_ctx.json; - } -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -224,31 +176,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -287,10 +214,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --snippets-filters env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -303,6 +226,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -335,150 +264,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - - mountPath: /var/lib/nginx/state - name: nginx-lib - - mountPath: /etc/nginx/license.jwt - name: nginx-plus-license - subPath: license.jwt - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - - --source - - /includes/mgmt.conf - - --nginx-plus - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap - - emptyDir: {} - name: nginx-lib - - name: nginx-plus-license - secret: - secretName: nplus-license ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -489,6 +274,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -502,3 +292,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 6fa5e75077..dfe78332b5 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -24,10 +24,24 @@ metadata: rules: - apiGroups: - "" + - apps resources: - - namespaces - - services - secrets + - configmaps + - serviceaccounts + - services + - deployments + verbs: + - create + - update + - delete + - list + - get + - watch +- apiGroups: + - "" + resources: + - namespaces - pods verbs: - get @@ -143,60 +157,6 @@ subjects: namespace: nginx-gateway --- apiVersion: v1 -data: - nginx-agent.conf: |- - command: - server: - host: nginx-gateway.nginx-gateway.svc - port: 443 - allowed_directories: - - /etc/nginx - - /usr/share/nginx - - /var/run/nginx - features: - - connection - - configuration - - certificates - - metrics - log: - level: debug - collector: - receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} - processors: - batch: {} - exporters: - prometheus_exporter: - server: - host: "0.0.0.0" - port: 9113 -kind: ConfigMap -metadata: - name: nginx-agent-config - namespace: nginx-gateway ---- -apiVersion: v1 -data: - main.conf: | - error_log stderr info; -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-includes-bootstrap - namespace: nginx-gateway ---- -apiVersion: v1 kind: Service metadata: labels: @@ -216,31 +176,6 @@ spec: app.kubernetes.io/name: nginx-gateway type: ClusterIP --- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - type: LoadBalancer ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -277,10 +212,6 @@ spec: - --leader-election-lock-name=nginx-gateway-leader-election - --snippets-filters env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - name: POD_NAMESPACE valueFrom: fieldRef: @@ -293,6 +224,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid + - name: INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: IMAGE_NAME + value: ghcr.io/nginx/nginx-gateway-fabric:edge image: ghcr.io/nginx/nginx-gateway-fabric:edge imagePullPolicy: Always name: nginx-gateway @@ -325,135 +262,6 @@ spec: serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tmp-nginx-deployment - namespace: nginx-gateway -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: tmp-nginx-deployment - spec: - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric/nginx:edge - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 80 - name: http - - containerPort: 443 - name: https - - containerPort: 9113 - name: metrics - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /var/log/nginx-agent - name: nginx-agent-log - - mountPath: /etc/nginx/conf.d - name: nginx-conf - - mountPath: /etc/nginx/stream-conf.d - name: nginx-stream-conf - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - - mountPath: /etc/nginx/secrets - name: nginx-secrets - - mountPath: /var/run/nginx - name: nginx-run - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /etc/nginx/includes - name: nginx-includes - initContainers: - - command: - - /usr/bin/gateway - - initialize - - --source - - /agent/nginx-agent.conf - - --destination - - /etc/nginx-agent - - --source - - /includes/main.conf - - --destination - - /etc/nginx/main-includes - env: - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: init - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /agent - name: nginx-agent-config - - mountPath: /etc/nginx-agent - name: nginx-agent - - mountPath: /includes - name: nginx-includes-bootstrap - - mountPath: /etc/nginx/main-includes - name: nginx-main-includes - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-agent - - configMap: - name: nginx-agent-config - name: nginx-agent-config - - emptyDir: {} - name: nginx-agent-log - - emptyDir: {} - name: nginx-conf - - emptyDir: {} - name: nginx-stream-conf - - emptyDir: {} - name: nginx-main-includes - - emptyDir: {} - name: nginx-secrets - - emptyDir: {} - name: nginx-run - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-includes - - configMap: - name: nginx-includes-bootstrap - name: nginx-includes-bootstrap ---- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: @@ -464,6 +272,11 @@ metadata: name: nginx spec: controllerName: gateway.nginx.org/nginx-gateway-controller + parametersRef: + group: gateway.nginx.org + kind: NginxProxy + name: nginx-gateway-proxy-config + namespace: nginx-gateway --- apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGateway @@ -477,3 +290,25 @@ metadata: spec: logging: level: info +--- +apiVersion: gateway.nginx.org/v1alpha2 +kind: NginxProxy +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-proxy-config + namespace: nginx-gateway +spec: + kubernetes: + deployment: + container: + image: + pullPolicy: Always + repository: ghcr.io/nginx/nginx-gateway-fabric/nginx + tag: edge + replicas: 1 + service: + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/docs/developer/quickstart.md b/docs/developer/quickstart.md index 058d3d5b9d..697acf5036 100644 --- a/docs/developer/quickstart.md +++ b/docs/developer/quickstart.md @@ -183,13 +183,13 @@ This will build the docker images `nginx-gateway-fabric:` and `nginx- - To install with Helm (where your release name is `my-release`): ```shell - helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never -n nginx-gateway + helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set nginx.service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never -n nginx-gateway ``` - To install NGINX Plus with Helm (where your release name is `my-release`): ```shell - helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx-plus --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never --set nginx.plus=true -n nginx-gateway + helm install my-release ./charts/nginx-gateway-fabric --create-namespace --wait --set nginx.service.type=NodePort --set nginxGateway.image.repository=nginx-gateway-fabric --set nginxGateway.image.tag=$(whoami) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=nginx-gateway-fabric/nginx-plus --set nginx.image.tag=$(whoami) --set nginx.image.pullPolicy=Never --set nginx.plus=true -n nginx-gateway ``` > For more information on Helm configuration options see the Helm [README](../../charts/nginx-gateway-fabric/README.md). diff --git a/examples/helm/aws-nlb/values.yaml b/examples/helm/aws-nlb/values.yaml index b1ffc87974..3034ca995f 100644 --- a/examples/helm/aws-nlb/values.yaml +++ b/examples/helm/aws-nlb/values.yaml @@ -1,7 +1,8 @@ nginxGateway: name: nginx-gateway -service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "external" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" +nginx: + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "external" + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" diff --git a/examples/helm/azure/values.yaml b/examples/helm/azure/values.yaml index 3dbfc24256..ee6669108c 100644 --- a/examples/helm/azure/values.yaml +++ b/examples/helm/azure/values.yaml @@ -1,4 +1,8 @@ nginxGateway: name: nginx-gateway -nodeSelector: - kubernetes.io/os: linux + nodeSelector: + kubernetes.io/os: linux +nginx: + pod: + nodeSelector: + kubernetes.io/os: linux diff --git a/examples/helm/experimental-nginx-plus/values.yaml b/examples/helm/experimental-nginx-plus/values.yaml index 08469ce364..e1d854fd3a 100644 --- a/examples/helm/experimental-nginx-plus/values.yaml +++ b/examples/helm/experimental-nginx-plus/values.yaml @@ -7,6 +7,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/examples/helm/nginx-plus/values.yaml b/examples/helm/nginx-plus/values.yaml index b8b842d16a..0b85bfc51b 100644 --- a/examples/helm/nginx-plus/values.yaml +++ b/examples/helm/nginx-plus/values.yaml @@ -5,6 +5,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/examples/helm/nodeport/values.yaml b/examples/helm/nodeport/values.yaml index 17da6a8849..93318a7b96 100644 --- a/examples/helm/nodeport/values.yaml +++ b/examples/helm/nodeport/values.yaml @@ -1,4 +1,5 @@ nginxGateway: name: nginx-gateway -service: - type: NodePort +nginx: + service: + type: NodePort diff --git a/examples/helm/snippets-filters-nginx-plus/values.yaml b/examples/helm/snippets-filters-nginx-plus/values.yaml index 9cacfdb168..89cc0b59b4 100644 --- a/examples/helm/snippets-filters-nginx-plus/values.yaml +++ b/examples/helm/snippets-filters-nginx-plus/values.yaml @@ -7,6 +7,4 @@ nginx: plus: true image: repository: private-registry.nginx.com/nginx-gateway-fabric/nginx-plus - -serviceAccount: imagePullSecret: nginx-plus-registry-secret diff --git a/go.mod b/go.mod index c541024259..8d6f09cc9c 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.uber.org/zap v1.27.0 + golang.org/x/text v0.24.0 google.golang.org/grpc v1.72.0 google.golang.org/protobuf v1.36.6 k8s.io/api v0.32.3 @@ -80,7 +81,6 @@ require ( golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.32.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/internal/framework/controller/labels.go b/internal/framework/controller/labels.go new file mode 100644 index 0000000000..79b6b55113 --- /dev/null +++ b/internal/framework/controller/labels.go @@ -0,0 +1,12 @@ +package controller + +// The following labels are added to each nginx resource created by the control plane. +const ( + GatewayLabel = "gateway.networking.k8s.io/gateway-name" + AppNameLabel = "app.kubernetes.io/name" + AppInstanceLabel = "app.kubernetes.io/instance" + AppManagedByLabel = "app.kubernetes.io/managed-by" +) + +// RestartedAnnotation is added to a Deployment or DaemonSet's PodSpec to trigger a rolling restart. +const RestartedAnnotation = "kubectl.kubernetes.io/restartedAt" diff --git a/internal/framework/controller/predicate/annotation.go b/internal/framework/controller/predicate/annotation.go index fdf1fd696f..46b48660de 100644 --- a/internal/framework/controller/predicate/annotation.go +++ b/internal/framework/controller/predicate/annotation.go @@ -1,8 +1,11 @@ package predicate import ( + appsv1 "k8s.io/api/apps/v1" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" ) // AnnotationPredicate implements a predicate function based on the Annotation. @@ -37,3 +40,41 @@ func (cp AnnotationPredicate) Update(e event.UpdateEvent) bool { return oldAnnotationVal != newAnnotationVal } + +// RestartDeploymentAnnotationPredicate skips update events if they are due to a rolling restart. +// This type of event is triggered by adding an annotation to the deployment's PodSpec. +// This is used by the provisioner to ensure it allows for rolling restarts of the nginx deployment +// without reverting the annotation and deleting the new pod(s). Otherwise, if a user changes +// the nginx deployment, we want to see that event so we can revert it back to the configuration +// that we expect it to have. +type RestartDeploymentAnnotationPredicate struct { + predicate.Funcs +} + +// Update filters UpdateEvents based on if the annotation is present or changed. +func (cp RestartDeploymentAnnotationPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + // this case should not happen + return false + } + + depOld, ok := e.ObjectOld.(*appsv1.Deployment) + if !ok { + return false + } + + depNew, ok := e.ObjectNew.(*appsv1.Deployment) + if !ok { + return false + } + + oldVal, oldExists := depOld.Spec.Template.Annotations[controller.RestartedAnnotation] + + if newVal, ok := depNew.Spec.Template.Annotations[controller.RestartedAnnotation]; ok { + if !oldExists || newVal != oldVal { + return false + } + } + + return true +} diff --git a/internal/framework/controller/predicate/annotation_test.go b/internal/framework/controller/predicate/annotation_test.go index 47cd762839..4ecc448b4d 100644 --- a/internal/framework/controller/predicate/annotation_test.go +++ b/internal/framework/controller/predicate/annotation_test.go @@ -4,9 +4,13 @@ import ( "testing" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" ) func TestAnnotationPredicate_Create(t *testing.T) { @@ -222,3 +226,177 @@ func TestAnnotationPredicate_Update(t *testing.T) { }) } } + +func TestRestartDeploymentAnnotationPredicate_Update(t *testing.T) { + t.Parallel() + + tests := []struct { + event event.UpdateEvent + name string + expUpdate bool + }{ + { + name: "annotation added", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "annotation changed", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "false", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "annotation removed", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + }, + }, + expUpdate: true, + }, + { + name: "annotation unchanged", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: true, + }, + { + name: "old object is nil", + event: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + }, + expUpdate: false, + }, + { + name: "new object is nil", + event: event.UpdateEvent{ + ObjectOld: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controller.RestartedAnnotation: "true", + }, + }, + }, + }, + }, + ObjectNew: nil, + }, + expUpdate: false, + }, + { + name: "both objects are nil", + event: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: nil, + }, + expUpdate: false, + }, + } + + p := RestartDeploymentAnnotationPredicate{} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + update := p.Update(test.event) + g.Expect(update).To(Equal(test.expUpdate)) + }) + } +} diff --git a/internal/framework/controller/predicate/label.go b/internal/framework/controller/predicate/label.go new file mode 100644 index 0000000000..06d1d157a6 --- /dev/null +++ b/internal/framework/controller/predicate/label.go @@ -0,0 +1,18 @@ +package predicate + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8spredicate "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// NginxLabelPredicate returns a predicate that only matches resources with the nginx labels. +func NginxLabelPredicate(selector metav1.LabelSelector) k8spredicate.Predicate { + labelPredicate, err := k8spredicate.LabelSelectorPredicate(selector) + if err != nil { + panic(fmt.Sprintf("error creating label selector: %v", err)) + } + + return labelPredicate +} diff --git a/internal/framework/controller/predicate/service.go b/internal/framework/controller/predicate/service.go index 04eea8f5d2..21e59e6ee0 100644 --- a/internal/framework/controller/predicate/service.go +++ b/internal/framework/controller/predicate/service.go @@ -2,9 +2,7 @@ package predicate import ( apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -65,54 +63,3 @@ func (ServicePortsChangedPredicate) Update(e event.UpdateEvent) bool { return len(newPortSet) > 0 } - -// GatewayServicePredicate implements predicate functions for this Pod's Service. -type GatewayServicePredicate struct { - predicate.Funcs - NSName types.NamespacedName -} - -// Update implements the default UpdateEvent filter for the Gateway Service. -func (gsp GatewayServicePredicate) Update(e event.UpdateEvent) bool { - if e.ObjectOld == nil { - return false - } - if e.ObjectNew == nil { - return false - } - - oldSvc, ok := e.ObjectOld.(*apiv1.Service) - if !ok { - return false - } - - newSvc, ok := e.ObjectNew.(*apiv1.Service) - if !ok { - return false - } - - if client.ObjectKeyFromObject(newSvc) != gsp.NSName { - return false - } - - if oldSvc.Spec.Type != newSvc.Spec.Type { - return true - } - - if newSvc.Spec.Type == apiv1.ServiceTypeLoadBalancer { - oldIngress := oldSvc.Status.LoadBalancer.Ingress - newIngress := newSvc.Status.LoadBalancer.Ingress - - if len(oldIngress) != len(newIngress) { - return true - } - - for i, ingress := range oldIngress { - if ingress.IP != newIngress[i].IP || ingress.Hostname != newIngress[i].Hostname { - return true - } - } - } - - return false -} diff --git a/internal/framework/controller/predicate/service_test.go b/internal/framework/controller/predicate/service_test.go index 98176774ec..fcb4aa694f 100644 --- a/internal/framework/controller/predicate/service_test.go +++ b/internal/framework/controller/predicate/service_test.go @@ -5,8 +5,6 @@ import ( . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -245,223 +243,7 @@ func TestServicePortsChangedPredicate(t *testing.T) { t.Parallel() g := NewWithT(t) - p := GatewayServicePredicate{} - - g.Expect(p.Delete(event.DeleteEvent{Object: &v1.Service{}})).To(BeTrue()) - g.Expect(p.Create(event.CreateEvent{Object: &v1.Service{}})).To(BeTrue()) - g.Expect(p.Generic(event.GenericEvent{Object: &v1.Service{}})).To(BeTrue()) -} - -func TestGatewayServicePredicate_Update(t *testing.T) { - t.Parallel() - testcases := []struct { - objectOld client.Object - objectNew client.Object - msg string - expUpdate bool - }{ - { - msg: "nil objectOld", - objectOld: nil, - objectNew: &v1.Service{}, - expUpdate: false, - }, - { - msg: "nil objectNew", - objectOld: &v1.Service{}, - objectNew: nil, - expUpdate: false, - }, - { - msg: "non-Service objectOld", - objectOld: &v1.Namespace{}, - objectNew: &v1.Service{}, - expUpdate: false, - }, - { - msg: "non-Service objectNew", - objectOld: &v1.Service{}, - objectNew: &v1.Namespace{}, - expUpdate: false, - }, - { - msg: "Service not watched", - objectOld: &v1.Service{}, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "not-watched", - }, - }, - expUpdate: false, - }, - { - msg: "something irrelevant changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.4", - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - ClusterIP: "5.6.7.8", - }, - }, - expUpdate: false, - }, - { - msg: "type changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, - }, - expUpdate: true, - }, - { - msg: "ingress changed length", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - { - IP: "5.6.7.8", - }, - }, - }, - }, - }, - expUpdate: true, - }, - { - msg: "IP address changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - }, - expUpdate: true, - }, - { - msg: "Hostname changed", - objectOld: &v1.Service{ - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeLoadBalancer, - }, - Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - Hostname: "one", - }, - }, - }, - }, - }, - objectNew: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx", - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - }, Status: v1.ServiceStatus{ - LoadBalancer: v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - { - Hostname: "two", - }, - }, - }, - }, - }, - expUpdate: true, - }, - } - - p := GatewayServicePredicate{NSName: types.NamespacedName{Namespace: "nginx-gateway", Name: "nginx"}} - - for _, tc := range testcases { - t.Run(tc.msg, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - update := p.Update(event.UpdateEvent{ - ObjectOld: tc.objectOld, - ObjectNew: tc.objectNew, - }) - - g.Expect(update).To(Equal(tc.expUpdate)) - }) - } -} - -func TestGatewayServicePredicate(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - p := GatewayServicePredicate{} + p := ServicePortsChangedPredicate{} g.Expect(p.Delete(event.DeleteEvent{Object: &v1.Service{}})).To(BeTrue()) g.Expect(p.Create(event.CreateEvent{Object: &v1.Service{}})).To(BeTrue()) diff --git a/internal/framework/controller/resource.go b/internal/framework/controller/resource.go new file mode 100644 index 0000000000..c238b64924 --- /dev/null +++ b/internal/framework/controller/resource.go @@ -0,0 +1,9 @@ +package controller + +import "fmt" + +// CreateNginxResourceName creates the base resource name for all nginx resources +// created by the control plane. +func CreateNginxResourceName(gatewayName, gatewayClassName string) string { + return fmt.Sprintf("%s-%s", gatewayName, gatewayClassName) +} diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index 82b4238836..19837a780a 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -8,13 +8,13 @@ import ( "k8s.io/apimachinery/pkg/types" ) +const DefaultNginxMetricsPort = int32(9113) + type Config struct { // AtomicLevel is an atomically changeable, dynamic logging level. AtomicLevel zap.AtomicLevel // UsageReportConfig specifies the NGINX Plus usage reporting configuration. UsageReportConfig UsageReportConfig - // Version is the running NGF version. - Version string // ImageSource is the source of the NGINX Gateway image. ImageSource string // Flags contains the NGF command-line flag names and values. @@ -52,8 +52,6 @@ type Config struct { // GatewayPodConfig contains information about this Pod. type GatewayPodConfig struct { - // PodIP is the IP address of this Pod. - PodIP string // ServiceName is the name of the Service that fronts this Pod. ServiceName string // Namespace is the namespace of this Pod. @@ -62,6 +60,13 @@ type GatewayPodConfig struct { Name string // UID is the UID of the Pod. UID string + // InstanceName is the name used in the instance label. + // Generally this will be the name of the Helm release. + InstanceName string + // Version is the running NGF version. + Version string + // Image is the image path of the Pod. + Image string } // MetricsConfig specifies the metrics config. diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 84a658ae4e..a77db52c3c 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -16,6 +16,7 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" frameworkStatus "github.com/nginx/nginx-gateway-fabric/internal/framework/status" @@ -23,6 +24,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" ngxConfig "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -39,6 +41,8 @@ type eventHandlerConfig struct { ctx context.Context // nginxUpdater updates nginx configuration using the NGINX agent. nginxUpdater agent.NginxUpdater + // nginxProvisioner handles provisioning and deprovisioning nginx resources. + nginxProvisioner provisioner.Provisioner // metricsCollector collects metrics for this controller. metricsCollector handlerMetricsCollector // statusUpdater updates statuses on Kubernetes resources. @@ -73,6 +77,8 @@ type eventHandlerConfig struct { controlConfigNSName types.NamespacedName // gatewayCtlrName is the name of the NGF controller. gatewayCtlrName string + // gatewayClassName is the name of the GatewayClass. + gatewayClassName string // updateGatewayClassStatus enables updating the status of the GatewayClass resource. updateGatewayClassStatus bool // plus is whether or not we are running NGINX Plus. @@ -129,18 +135,6 @@ func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl { upsert: handler.nginxGatewayCRDUpsert, delete: handler.nginxGatewayCRDDelete, }, - // NGF-fronting Service - objectFilterKey( - &v1.Service{}, - types.NamespacedName{ - Name: handler.cfg.gatewayPodConfig.ServiceName, - Namespace: handler.cfg.gatewayPodConfig.Namespace, - }, - ): { - upsert: handler.nginxGatewayServiceUpsert, - delete: handler.nginxGatewayServiceDelete, - captureChangeInGraph: true, - }, } go handler.waitForStatusUpdates(cfg.ctx) @@ -194,16 +188,37 @@ func (h *eventHandlerImpl) sendNginxConfig( gr *graph.Graph, changeType state.ChangeType, ) { - deploymentName := types.NamespacedName{ - Name: "tmp-nginx-deployment", - Namespace: h.cfg.gatewayPodConfig.Namespace, + if gr == nil { + logger.Info("Handling events didn't result into NGINX configuration changes") + return + } + + if gr.Gateway == nil { + // still need to update GatewayClass status + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, + } + h.cfg.statusQueue.Enqueue(obj) + return + } + + go func() { + if err := h.cfg.nginxProvisioner.RegisterGateway(ctx, gr.Gateway, gr.DeploymentName.Name); err != nil { + logger.Error(err, "error from provisioner") + } + }() + + if !gr.Gateway.Valid { + obj := &status.QueueObject{ + Deployment: gr.DeploymentName, + UpdateType: status.UpdateAll, + } + h.cfg.statusQueue.Enqueue(obj) + return } - // TODO(sberman): if nginx Deployment is scaled down, we should remove the pod from the ConnectionsTracker - // and Deployment. - // If fully deleted, then delete the deployment from the Store and close the stopCh. stopCh := make(chan struct{}) - deployment := h.cfg.nginxDeployments.GetOrStore(ctx, deploymentName, stopCh) + deployment := h.cfg.nginxDeployments.GetOrStore(ctx, gr.DeploymentName, stopCh) if deployment == nil { panic("expected deployment, got nil") } @@ -216,8 +231,9 @@ func (h *eventHandlerImpl) sendNginxConfig( if configApplied || err != nil { obj := &status.QueueObject{ + UpdateType: status.UpdateAll, Error: err, - Deployment: deploymentName, + Deployment: gr.DeploymentName, } h.cfg.statusQueue.Enqueue(obj) } @@ -232,9 +248,6 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( ) bool { var configApplied bool switch changeType { - case state.NoChange: - logger.Info("Handling events didn't result into NGINX configuration changes") - return false case state.EndpointsOnlyChange: h.version++ cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) @@ -279,28 +292,74 @@ func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { return } + // TODO(sberman): once we support multiple Gateways, we'll have to get + // the correct Graph for the Deployment contained in the update message + gr := h.cfg.processor.GetLatestGraph() + if gr == nil { + continue + } + var nginxReloadRes graph.NginxReloadResult switch { case item.Error != nil: h.cfg.logger.Error(item.Error, "Failed to update NGINX configuration") nginxReloadRes.Error = item.Error - default: + case gr.Gateway != nil: h.cfg.logger.Info("NGINX configuration was successfully updated") } - - // TODO(sberman): once we support multiple Gateways, we'll have to get - // the correct Graph for the Deployment contained in the update message - gr := h.cfg.processor.GetLatestGraph() gr.LatestReloadResult = nginxReloadRes - h.updateStatuses(ctx, gr) + switch item.UpdateType { + case status.UpdateAll: + h.updateStatuses(ctx, gr) + case status.UpdateGateway: + gwAddresses, err := getGatewayAddresses( + ctx, + h.cfg.k8sClient, + item.GatewayService, + gr.Gateway, + h.cfg.gatewayClassName, + ) + if err != nil { + msg := "error getting Gateway Service IP address" + h.cfg.logger.Error(err, msg) + h.cfg.eventRecorder.Eventf( + item.GatewayService, + v1.EventTypeWarning, + "GetServiceIPFailed", + msg+": %s", + err.Error(), + ) + continue + } + + transitionTime := metav1.Now() + gatewayStatuses := status.PrepareGatewayRequests( + gr.Gateway, + gr.IgnoredGateways, + transitionTime, + gwAddresses, + gr.LatestReloadResult, + ) + h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) + default: + panic(fmt.Sprintf("unknown event type %T", item.UpdateType)) + } } } func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) { - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig) + gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, gr.Gateway, h.cfg.gatewayClassName) if err != nil { - h.cfg.logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") + msg := "error getting Gateway Service IP address" + h.cfg.logger.Error(err, msg) + h.cfg.eventRecorder.Eventf( + &v1.Service{}, + v1.EventTypeWarning, + "GetServiceIPFailed", + msg+": %s", + err.Error(), + ) } transitionTime := metav1.Now() @@ -440,27 +499,27 @@ func getGatewayAddresses( ctx context.Context, k8sClient client.Client, svc *v1.Service, - podConfig ngfConfig.GatewayPodConfig, + gateway *graph.Gateway, + gatewayClassName string, ) ([]gatewayv1.GatewayStatusAddress, error) { - podAddress := []gatewayv1.GatewayStatusAddress{ - { - Type: helpers.GetPointer(gatewayv1.IPAddressType), - Value: podConfig.PodIP, - }, + if gateway == nil { + return nil, nil } var gwSvc v1.Service if svc == nil { - key := types.NamespacedName{Name: podConfig.ServiceName, Namespace: podConfig.Namespace} + svcName := controller.CreateNginxResourceName(gateway.Source.GetName(), gatewayClassName) + key := types.NamespacedName{Name: svcName, Namespace: gateway.Source.GetNamespace()} if err := k8sClient.Get(ctx, key, &gwSvc); err != nil { - return podAddress, fmt.Errorf("error finding Service for Gateway: %w", err) + return nil, fmt.Errorf("error finding Service for Gateway: %w", err) } } else { gwSvc = *svc } var addresses, hostnames []string - if gwSvc.Spec.Type == v1.ServiceTypeLoadBalancer { + switch gwSvc.Spec.Type { + case v1.ServiceTypeLoadBalancer: for _, ingress := range gwSvc.Status.LoadBalancer.Ingress { if ingress.IP != "" { addresses = append(addresses, ingress.IP) @@ -468,6 +527,8 @@ func getGatewayAddresses( hostnames = append(hostnames, ingress.Hostname) } } + default: + addresses = append(addresses, gwSvc.Spec.ClusterIP) } gwAddresses := make([]gatewayv1.GatewayStatusAddress, 0, len(addresses)+len(hostnames)) @@ -546,56 +607,3 @@ func (h *eventHandlerImpl) nginxGatewayCRDDelete( ) { h.updateControlPlaneAndSetStatus(ctx, logger, nil) } - -func (h *eventHandlerImpl) nginxGatewayServiceUpsert(ctx context.Context, logger logr.Logger, obj client.Object) { - svc, ok := obj.(*v1.Service) - if !ok { - panic(fmt.Errorf("obj type mismatch: got %T, expected %T", svc, &v1.Service{})) - } - - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, svc, h.cfg.gatewayPodConfig) - if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") - } - - gr := h.cfg.processor.GetLatestGraph() - if gr == nil { - return - } - - transitionTime := metav1.Now() - gatewayStatuses := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, - transitionTime, - gwAddresses, - gr.LatestReloadResult, - ) - h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) -} - -func (h *eventHandlerImpl) nginxGatewayServiceDelete( - ctx context.Context, - logger logr.Logger, - _ types.NamespacedName, -) { - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig) - if err != nil { - logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address") - } - - gr := h.cfg.processor.GetLatestGraph() - if gr == nil { - return - } - - transitionTime := metav1.Now() - gatewayStatuses := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, - transitionTime, - gwAddresses, - gr.LatestReloadResult, - ) - h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) -} diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index dbc4ea9ed6..b479b8b34e 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -29,6 +29,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/provisionerfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" @@ -38,10 +39,12 @@ import ( var _ = Describe("eventHandler", func() { var ( + baseGraph *graph.Graph handler *eventHandlerImpl fakeProcessor *statefakes.FakeChangeProcessor fakeGenerator *configfakes.FakeGenerator fakeNginxUpdater *agentfakes.FakeNginxUpdater + fakeProvisioner *provisionerfakes.FakeProvisioner fakeStatusUpdater *statusfakes.FakeGroupUpdater fakeEventRecorder *record.FakeRecorder fakeK8sClient client.WithWatch @@ -84,18 +87,29 @@ var _ = Describe("eventHandler", func() { _, name, reqs = fakeStatusUpdater.UpdateGroupArgsForCall(1) Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) + Expect(reqs).To(HaveLen(1)) + + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(1)) } BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext // ignore for test + baseGraph = &graph.Graph{ + Gateway: &graph.Gateway{ + Valid: true, + Source: &gatewayv1.Gateway{}, + }, + } + fakeProcessor = &statefakes.FakeChangeProcessor{} fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) - fakeProcessor.GetLatestGraphReturns(&graph.Graph{}) + fakeProcessor.GetLatestGraphReturns(baseGraph) fakeGenerator = &configfakes.FakeGenerator{} fakeNginxUpdater = &agentfakes.FakeNginxUpdater{} fakeNginxUpdater.UpdateConfigReturns(true) + fakeProvisioner = &provisionerfakes.FakeProvisioner{} + fakeProvisioner.RegisterGatewayReturns(nil) fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} fakeEventRecorder = record.NewFakeRecorder(1) zapLogLevelSetter = newZapLogLevelSetter(zap.NewAtomicLevel()) @@ -112,6 +126,7 @@ var _ = Describe("eventHandler", func() { generator: fakeGenerator, logLevelSetter: zapLogLevelSetter, nginxUpdater: fakeNginxUpdater, + nginxProvisioner: fakeProvisioner, statusUpdater: fakeStatusUpdater, eventRecorder: fakeEventRecorder, deployCtxCollector: &licensingfakes.FakeCollector{}, @@ -157,8 +172,7 @@ var _ = Describe("eventHandler", func() { } BeforeEach(func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange /* changed */, &graph.Graph{}) - + fakeProcessor.ProcessReturns(state.ClusterStateChange, baseGraph) fakeGenerator.GenerateReturns(fakeCfgFiles) }) @@ -195,11 +209,24 @@ var _ = Describe("eventHandler", func() { expectReconfig(dcfg, fakeCfgFiles) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) }) + + It("should not build anything if Gateway isn't set", func() { + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) + Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + }) }) When("a batch has multiple events", func() { It("should process events", func() { - upsertEvent := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + upsertEvent := &events.UpsertEvent{Resource: &gatewayv1.Gateway{}} deleteEvent := &events.DeleteEvent{ Type: &gatewayv1.HTTPRoute{}, NamespacedName: types.NamespacedName{Namespace: "test", Name: "route"}, @@ -357,77 +384,6 @@ var _ = Describe("eventHandler", func() { }) }) - When("receiving Service updates", func() { - const notNginxGatewayServiceName = "not-nginx-gateway" - - BeforeEach(func() { - fakeProcessor.GetLatestGraphReturns(&graph.Graph{}) - - Expect(fakeK8sClient.Create(context.Background(), createService(notNginxGatewayServiceName))).To(Succeed()) - }) - - It("should not call UpdateAddresses if the Service is not for the Gateway Pod", func() { - e := &events.UpsertEvent{Resource: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-nginx-gateway", - }, - }} - batch := []interface{}{e} - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(BeZero()) - - de := &events.DeleteEvent{Type: &v1.Service{}} - batch = []interface{}{de} - Expect(fakeK8sClient.Delete(context.Background(), createService(notNginxGatewayServiceName))).To(Succeed()) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(BeZero()) - }) - - It("should update the addresses when the Gateway Service is upserted", func() { - e := &events.UpsertEvent{Resource: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nginx-gateway", - Namespace: "nginx-gateway", - }, - }} - batch := []interface{}{e} - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) - }) - - It("should update the addresses when the Gateway Service is deleted", func() { - e := &events.DeleteEvent{ - Type: &v1.Service{}, - NamespacedName: types.NamespacedName{ - Name: "nginx-gateway", - Namespace: "nginx-gateway", - }, - } - batch := []interface{}{e} - Expect(fakeK8sClient.Delete(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupGateways)) - Expect(reqs).To(BeEmpty()) - }) - }) - When("receiving an EndpointsOnlyChange update", func() { e := &events.UpsertEvent{Resource: &discoveryV1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ @@ -438,7 +394,7 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} BeforeEach(func() { - fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{Gateway: &graph.Gateway{Valid: true}}) }) When("running NGINX Plus", func() { @@ -472,6 +428,7 @@ var _ = Describe("eventHandler", func() { It("should update status when receiving a queue event", func() { obj := &status.QueueObject{ + UpdateType: status.UpdateAll, Deployment: types.NamespacedName{}, Error: errors.New("status error"), } @@ -486,6 +443,20 @@ var _ = Describe("eventHandler", func() { Expect(gr.LatestReloadResult.Error.Error()).To(Equal("status error")) }) + It("should update Gateway status when receiving a queue event", func() { + obj := &status.QueueObject{ + UpdateType: status.UpdateGateway, + Deployment: types.NamespacedName{}, + GatewayService: &v1.Service{}, + } + queue.Enqueue(obj) + + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + }) + It("should update nginx conf only when leader", func() { ctx := context.Background() handler.cfg.graphBuiltHealthChecker.leader = false @@ -494,7 +465,7 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{Gateway: &graph.Gateway{Valid: true}}) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) @@ -535,23 +506,25 @@ var _ = Describe("eventHandler", func() { var _ = Describe("getGatewayAddresses", func() { It("gets gateway addresses from a Service", func() { fakeClient := fake.NewFakeClient() - podConfig := config.GatewayPodConfig{ - PodIP: "1.2.3.4", - ServiceName: "my-service", - Namespace: "nginx-gateway", - } // no Service exists yet, should get error and Pod Address - addrs, err := getGatewayAddresses(context.Background(), fakeClient, nil, podConfig) + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test-ns", + }, + }, + } + addrs, err := getGatewayAddresses(context.Background(), fakeClient, nil, gateway, "nginx") Expect(err).To(HaveOccurred()) - Expect(addrs).To(HaveLen(1)) - Expect(addrs[0].Value).To(Equal("1.2.3.4")) + Expect(addrs).To(BeNil()) // Create LoadBalancer Service svc := v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - Namespace: "nginx-gateway", + Name: "gateway-nginx", + Namespace: "test-ns", }, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeLoadBalancer, @@ -572,11 +545,31 @@ var _ = Describe("getGatewayAddresses", func() { Expect(fakeClient.Create(context.Background(), &svc)).To(Succeed()) - addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, podConfig) + addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") Expect(err).ToNot(HaveOccurred()) Expect(addrs).To(HaveLen(2)) Expect(addrs[0].Value).To(Equal("34.35.36.37")) Expect(addrs[1].Value).To(Equal("myhost")) + + Expect(fakeClient.Delete(context.Background(), &svc)).To(Succeed()) + // Create ClusterIP Service + svc = v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway-nginx", + Namespace: "test-ns", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ClusterIP: "12.13.14.15", + }, + } + + Expect(fakeClient.Create(context.Background(), &svc)).To(Succeed()) + + addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") + Expect(err).ToNot(HaveOccurred()) + Expect(addrs).To(HaveLen(1)) + Expect(addrs[0].Value).To(Equal("12.13.14.15")) }) }) diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 31574a9f64..119c49a8c3 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -58,6 +58,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/observability" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies/upstreamsettings" ngxvalidation "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/validation" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/resolver" @@ -122,12 +123,6 @@ func StartManager(cfg config.Config) error { return err } - // protectedPorts is the map of ports that may not be configured by a listener, and the name of what it is used for - protectedPorts := map[int32]string{ - int32(cfg.MetricsConfig.Port): "MetricsPort", //nolint:gosec // port will not overflow int32 - int32(cfg.HealthConfig.Port): "HealthPort", //nolint:gosec // port will not overflow int32 - } - mustExtractGVK := kinds.NewMustExtractGKV(scheme) genericValidator := ngxvalidation.GenericValidator{} @@ -149,7 +144,6 @@ func StartManager(cfg config.Config) error { }, EventRecorder: recorder, MustExtractGVK: mustExtractGVK, - ProtectedPorts: protectedPorts, PlusSecrets: plusSecrets, }) @@ -205,9 +199,31 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register grpc server: %w", err) } + nginxProvisioner, provLoop, err := provisioner.NewNginxProvisioner( + ctx, + mgr, + provisioner.Config{ + DeploymentStore: nginxUpdater.NginxDeployments, + StatusQueue: statusQueue, + Logger: cfg.Logger.WithName("provisioner"), + EventRecorder: recorder, + GatewayPodConfig: cfg.GatewayPodConfig, + GCName: cfg.GatewayClassName, + Plus: cfg.Plus, + }, + ) + if err != nil { + return fmt.Errorf("error building provisioner: %w", err) + } + + if err := mgr.Add(&runnables.LeaderOrNonLeader{Runnable: provLoop}); err != nil { + return fmt.Errorf("cannot register provisioner event loop: %w", err) + } + eventHandler := newEventHandlerImpl(eventHandlerConfig{ ctx: ctx, nginxUpdater: nginxUpdater, + nginxProvisioner: nginxProvisioner, metricsCollector: handlerCollector, statusUpdater: groupStatusUpdater, processor: processor, @@ -227,6 +243,7 @@ func StartManager(cfg config.Config) error { gatewayPodConfig: cfg.GatewayPodConfig, controlConfigNSName: controlConfigNSName, gatewayCtlrName: cfg.GatewayCtlrName, + gatewayClassName: cfg.GatewayClassName, updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, plus: cfg.Plus, statusQueue: statusQueue, @@ -249,6 +266,7 @@ func StartManager(cfg config.Config) error { if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ groupStatusUpdater.Enable, + nginxProvisioner.Enable, healthChecker.setAsLeader, eventHandler.eventHandlerEnable, })); err != nil { @@ -260,7 +278,7 @@ func StartManager(cfg config.Config) error { K8sClientReader: mgr.GetAPIReader(), GraphGetter: processor, ConfigurationGetter: eventHandler, - Version: cfg.Version, + Version: cfg.GatewayPodConfig.Version, PodNSName: types.NamespacedName{ Namespace: cfg.GatewayPodConfig.Namespace, Name: cfg.GatewayPodConfig.Name, @@ -417,19 +435,6 @@ func registerControllers( controller.WithK8sPredicate(predicate.ServicePortsChangedPredicate{}), }, }, - { - objectType: &apiv1.Service{}, - name: "ngf-service", // unique controller names are needed and we have multiple Service ctlrs - options: func() []controller.Option { - svcNSName := types.NamespacedName{ - Namespace: cfg.GatewayPodConfig.Namespace, - Name: cfg.GatewayPodConfig.ServiceName, - } - return []controller.Option{ - controller.WithK8sPredicate(predicate.GatewayServicePredicate{NSName: svcNSName}), - } - }(), - }, { objectType: &apiv1.Secret{}, options: []controller.Option{ diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 58fad509db..3d2ff78b4a 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -115,6 +115,8 @@ func (n *NginxUpdaterImpl) UpdateUpstreamServers( // TODO(sberman): optimize this by only sending updates that are necessary. // Call GetUpstreams first (will need Subscribers to send responses back), and // then determine which upstreams actually need to be updated. + // OR we can possibly just use the most recent NGINXPlusActions to see what the last state + // of upstreams were, and only update the diff. var errs []error var applied bool diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index 236a34f57d..31d96143a7 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -128,6 +128,7 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error if !ok { return agentgrpc.ErrStatusInvalidConnection } + defer cs.connTracker.RemoveConnection(gi.IPAddress) // wait for the agent to report itself and nginx conn, deployment, err := cs.waitForConnection(ctx, gi) @@ -135,6 +136,7 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error cs.logger.Error(err, "error waiting for connection") return err } + defer deployment.RemovePodStatus(conn.PodName) cs.logger.Info(fmt.Sprintf("Successfully connected to nginx agent %s", conn.PodName)) @@ -367,6 +369,7 @@ func (cs *commandService) logAndSendErrorStatus(deployment *Deployment, conn *ag queueObj := &status.QueueObject{ Deployment: conn.Parent, Error: deployment.GetConfigurationStatus(), + UpdateType: status.UpdateAll, } cs.statusQueue.Enqueue(queueObj) } @@ -504,7 +507,6 @@ func getNginxInstanceID(instances []*pb.Instance) string { } // UpdateDataPlaneHealth includes full health information about the data plane as reported by the agent. -// TODO(sberman): Is health monitoring the data planes something useful for us to do? func (cs *commandService) UpdateDataPlaneHealth( _ context.Context, _ *pb.UpdateDataPlaneHealthRequest, diff --git a/internal/mode/static/nginx/agent/command_test.go b/internal/mode/static/nginx/agent/command_test.go index 340c4deda9..714ffaafe5 100644 --- a/internal/mode/static/nginx/agent/command_test.go +++ b/internal/mode/static/nginx/agent/command_test.go @@ -399,11 +399,17 @@ func TestSubscribe(t *testing.T) { ensureAPIRequestWasSent(g, mockServer, loopAction) verifyResponse(g, mockServer, responseCh) + g.Eventually(func() map[string]error { + return deployment.podStatuses + }).Should(HaveKey("nginx-pod")) + cancel() g.Eventually(func() error { return <-errCh }).Should(MatchError(ContainSubstring("context canceled"))) + + g.Expect(deployment.podStatuses).ToNot(HaveKey("nginx-pod")) } func TestSubscribe_Errors(t *testing.T) { diff --git a/internal/mode/static/nginx/agent/deployment.go b/internal/mode/static/nginx/agent/deployment.go index c0bd2bca1d..bafdc6ad9e 100644 --- a/internal/mode/static/nginx/agent/deployment.go +++ b/internal/mode/static/nginx/agent/deployment.go @@ -104,6 +104,14 @@ func (d *Deployment) GetLatestUpstreamError() error { return d.latestUpstreamError } +// RemovePodStatus deletes a pod from the pod status map. +func (d *Deployment) RemovePodStatus(podName string) { + d.Lock.Lock() + defer d.Lock.Unlock() + + delete(d.podStatuses, podName) +} + /* The following functions for the Deployment object are UNLOCKED, meaning that they are unsafe. Callers of these functions MUST ensure the lock is set before calling. @@ -255,9 +263,7 @@ func (d *DeploymentStore) StoreWithBroadcaster( return deployment } -// Remove cleans up any connections that are tracked for this deployment, and then removes -// the deployment from the store. +// Remove the deployment from the store. func (d *DeploymentStore) Remove(nsName types.NamespacedName) { - d.connTracker.UntrackConnectionsForParent(nsName) d.deployments.Delete(nsName) } diff --git a/internal/mode/static/nginx/agent/deployment_test.go b/internal/mode/static/nginx/agent/deployment_test.go index e4881b9934..3c6dc4c859 100644 --- a/internal/mode/static/nginx/agent/deployment_test.go +++ b/internal/mode/static/nginx/agent/deployment_test.go @@ -91,6 +91,9 @@ func TestSetPodErrorStatus(t *testing.T) { g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error"))) g.Expect(deployment.GetConfigurationStatus()).To(MatchError(ContainSubstring("test error 2"))) + + deployment.RemovePodStatus("test-pod") + g.Expect(deployment.podStatuses).ToNot(HaveKey("test-pod")) } func TestSetLatestConfigError(t *testing.T) { diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go index 8f1adc2c75..6b30ce4b59 100644 --- a/internal/mode/static/nginx/agent/grpc/connections.go +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -16,7 +16,7 @@ type ConnectionsTracker interface { Track(key string, conn Connection) GetConnection(key string) Connection SetInstanceID(key, id string) - UntrackConnectionsForParent(parent types.NamespacedName) + RemoveConnection(key string) } // Connection contains the data about a single nginx agent connection. @@ -77,14 +77,10 @@ func (c *AgentConnectionsTracker) SetInstanceID(key, id string) { } } -// UntrackConnectionsForParent removes all Connections that reference the specified parent. -func (c *AgentConnectionsTracker) UntrackConnectionsForParent(parent types.NamespacedName) { +// RemoveConnection removes a connection from the tracking map. +func (c *AgentConnectionsTracker) RemoveConnection(key string) { c.lock.Lock() defer c.lock.Unlock() - for key, conn := range c.connections { - if conn.Parent == parent { - delete(c.connections, key) - } - } + delete(c.connections, key) } diff --git a/internal/mode/static/nginx/agent/grpc/connections_test.go b/internal/mode/static/nginx/agent/grpc/connections_test.go index be0ca18a8b..c9d7b3cdc3 100644 --- a/internal/mode/static/nginx/agent/grpc/connections_test.go +++ b/internal/mode/static/nginx/agent/grpc/connections_test.go @@ -75,25 +75,21 @@ func TestSetInstanceID(t *testing.T) { g.Expect(trackedConn.InstanceID).To(Equal("instance1")) } -func TestUntrackConnectionsForParent(t *testing.T) { +func TestRemoveConnection(t *testing.T) { t.Parallel() g := NewWithT(t) tracker := agentgrpc.NewConnectionsTracker() + conn := agentgrpc.Connection{ + PodName: "pod1", + InstanceID: "instance1", + Parent: types.NamespacedName{Namespace: "default", Name: "parent1"}, + } + tracker.Track("key1", conn) - parent1 := types.NamespacedName{Namespace: "default", Name: "parent1"} - conn1 := agentgrpc.Connection{PodName: "pod1", InstanceID: "instance1", Parent: parent1} - conn2 := agentgrpc.Connection{PodName: "pod2", InstanceID: "instance2", Parent: parent1} - - parent2 := types.NamespacedName{Namespace: "default", Name: "parent2"} - conn3 := agentgrpc.Connection{PodName: "pod3", InstanceID: "instance3", Parent: parent2} - - tracker.Track("key1", conn1) - tracker.Track("key2", conn2) - tracker.Track("key3", conn3) + trackedConn := tracker.GetConnection("key1") + g.Expect(trackedConn).To(Equal(conn)) - tracker.UntrackConnectionsForParent(parent1) + tracker.RemoveConnection("key1") g.Expect(tracker.GetConnection("key1")).To(Equal(agentgrpc.Connection{})) - g.Expect(tracker.GetConnection("key2")).To(Equal(agentgrpc.Connection{})) - g.Expect(tracker.GetConnection("key3")).To(Equal(conn3)) } diff --git a/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go index a82da0a5a2..8ae97043cd 100644 --- a/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go +++ b/internal/mode/static/nginx/agent/grpc/grpcfakes/fake_connections_tracker.go @@ -5,7 +5,6 @@ import ( "sync" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc" - "k8s.io/apimachinery/pkg/types" ) type FakeConnectionsTracker struct { @@ -20,6 +19,11 @@ type FakeConnectionsTracker struct { getConnectionReturnsOnCall map[int]struct { result1 grpc.Connection } + RemoveConnectionStub func(string) + removeConnectionMutex sync.RWMutex + removeConnectionArgsForCall []struct { + arg1 string + } SetInstanceIDStub func(string, string) setInstanceIDMutex sync.RWMutex setInstanceIDArgsForCall []struct { @@ -32,11 +36,6 @@ type FakeConnectionsTracker struct { arg1 string arg2 grpc.Connection } - UntrackConnectionsForParentStub func(types.NamespacedName) - untrackConnectionsForParentMutex sync.RWMutex - untrackConnectionsForParentArgsForCall []struct { - arg1 types.NamespacedName - } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -102,6 +101,38 @@ func (fake *FakeConnectionsTracker) GetConnectionReturnsOnCall(i int, result1 gr }{result1} } +func (fake *FakeConnectionsTracker) RemoveConnection(arg1 string) { + fake.removeConnectionMutex.Lock() + fake.removeConnectionArgsForCall = append(fake.removeConnectionArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.RemoveConnectionStub + fake.recordInvocation("RemoveConnection", []interface{}{arg1}) + fake.removeConnectionMutex.Unlock() + if stub != nil { + fake.RemoveConnectionStub(arg1) + } +} + +func (fake *FakeConnectionsTracker) RemoveConnectionCallCount() int { + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() + return len(fake.removeConnectionArgsForCall) +} + +func (fake *FakeConnectionsTracker) RemoveConnectionCalls(stub func(string)) { + fake.removeConnectionMutex.Lock() + defer fake.removeConnectionMutex.Unlock() + fake.RemoveConnectionStub = stub +} + +func (fake *FakeConnectionsTracker) RemoveConnectionArgsForCall(i int) string { + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() + argsForCall := fake.removeConnectionArgsForCall[i] + return argsForCall.arg1 +} + func (fake *FakeConnectionsTracker) SetInstanceID(arg1 string, arg2 string) { fake.setInstanceIDMutex.Lock() fake.setInstanceIDArgsForCall = append(fake.setInstanceIDArgsForCall, struct { @@ -168,49 +199,17 @@ func (fake *FakeConnectionsTracker) TrackArgsForCall(i int) (string, grpc.Connec return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeConnectionsTracker) UntrackConnectionsForParent(arg1 types.NamespacedName) { - fake.untrackConnectionsForParentMutex.Lock() - fake.untrackConnectionsForParentArgsForCall = append(fake.untrackConnectionsForParentArgsForCall, struct { - arg1 types.NamespacedName - }{arg1}) - stub := fake.UntrackConnectionsForParentStub - fake.recordInvocation("UntrackConnectionsForParent", []interface{}{arg1}) - fake.untrackConnectionsForParentMutex.Unlock() - if stub != nil { - fake.UntrackConnectionsForParentStub(arg1) - } -} - -func (fake *FakeConnectionsTracker) UntrackConnectionsForParentCallCount() int { - fake.untrackConnectionsForParentMutex.RLock() - defer fake.untrackConnectionsForParentMutex.RUnlock() - return len(fake.untrackConnectionsForParentArgsForCall) -} - -func (fake *FakeConnectionsTracker) UntrackConnectionsForParentCalls(stub func(types.NamespacedName)) { - fake.untrackConnectionsForParentMutex.Lock() - defer fake.untrackConnectionsForParentMutex.Unlock() - fake.UntrackConnectionsForParentStub = stub -} - -func (fake *FakeConnectionsTracker) UntrackConnectionsForParentArgsForCall(i int) types.NamespacedName { - fake.untrackConnectionsForParentMutex.RLock() - defer fake.untrackConnectionsForParentMutex.RUnlock() - argsForCall := fake.untrackConnectionsForParentArgsForCall[i] - return argsForCall.arg1 -} - func (fake *FakeConnectionsTracker) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() fake.getConnectionMutex.RLock() defer fake.getConnectionMutex.RUnlock() + fake.removeConnectionMutex.RLock() + defer fake.removeConnectionMutex.RUnlock() fake.setInstanceIDMutex.RLock() defer fake.setInstanceIDMutex.RUnlock() fake.trackMutex.RLock() defer fake.trackMutex.RUnlock() - fake.untrackConnectionsForParentMutex.RLock() - defer fake.untrackConnectionsForParentMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/provisioner/doc.go b/internal/mode/static/provisioner/doc.go new file mode 100644 index 0000000000..14cffc569b --- /dev/null +++ b/internal/mode/static/provisioner/doc.go @@ -0,0 +1,4 @@ +/* +Package provisioner contains the functions for deploying an instance of nginx. +*/ +package provisioner diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go new file mode 100644 index 0000000000..c4ccc2b2e1 --- /dev/null +++ b/internal/mode/static/provisioner/eventloop.go @@ -0,0 +1,126 @@ +package provisioner + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + k8spredicate "sigs.k8s.io/controller-runtime/pkg/predicate" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/predicate" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" +) + +func newEventLoop( + ctx context.Context, + mgr manager.Manager, + handler *eventHandler, + logger logr.Logger, + selector metav1.LabelSelector, +) (*events.EventLoop, error) { + nginxResourceLabelPredicate := predicate.NginxLabelPredicate(selector) + + controllerRegCfgs := []struct { + objectType ngftypes.ObjectType + options []controller.Option + }{ + { + objectType: &gatewayv1.Gateway{}, + }, + { + objectType: &appsv1.Deployment{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + predicate.RestartDeploymentAnnotationPredicate{}, + ), + ), + }, + }, + { + objectType: &corev1.Service{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + nginxResourceLabelPredicate, + ), + ), + }, + }, + { + objectType: &corev1.ServiceAccount{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + { + objectType: &corev1.ConfigMap{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + } + + eventCh := make(chan interface{}) + for _, regCfg := range controllerRegCfgs { + gvk, err := apiutil.GVKForObject(regCfg.objectType, mgr.GetScheme()) + if err != nil { + panic(fmt.Sprintf("could not extract GVK for object: %T", regCfg.objectType)) + } + + if err := controller.Register( + ctx, + regCfg.objectType, + fmt.Sprintf("provisioner-%s", gvk.Kind), + mgr, + eventCh, + regCfg.options..., + ); err != nil { + return nil, fmt.Errorf("cannot register controller for %T: %w", regCfg.objectType, err) + } + } + + firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( + mgr.GetCache(), + []client.Object{}, + []client.ObjectList{ + // GatewayList MUST be first in this list to ensure that we see it before attempting + // to provision or deprovision any nginx resources. + &gatewayv1.GatewayList{}, + &appsv1.DeploymentList{}, + &corev1.ServiceList{}, + &corev1.ServiceAccountList{}, + &corev1.ConfigMapList{}, + }, + ) + + eventLoop := events.NewEventLoop( + eventCh, + logger.WithName("eventLoop"), + handler, + firstBatchPreparer, + ) + + return eventLoop, nil +} diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go new file mode 100644 index 0000000000..405b670c18 --- /dev/null +++ b/internal/mode/static/provisioner/handler.go @@ -0,0 +1,162 @@ +package provisioner + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +// eventHandler ensures each Gateway for the specific GatewayClass has a corresponding Deployment +// of NGF configured to use that specific Gateway. +// +// eventHandler implements events.Handler interface. +type eventHandler struct { + store *store + provisioner *NginxProvisioner + labelSelector labels.Selector + // gcName is the GatewayClass name for this control plane. + gcName string +} + +func newEventHandler( + store *store, + provisioner *NginxProvisioner, + selector metav1.LabelSelector, + gcName string, +) (*eventHandler, error) { + labelSelector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + return nil, fmt.Errorf("error initializing label selector: %w", err) + } + + return &eventHandler{ + store: store, + provisioner: provisioner, + labelSelector: labelSelector, + gcName: gcName, + }, nil +} + +func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, batch events.EventBatch) { + for _, event := range batch { + switch e := event.(type) { + case *events.UpsertEvent: + switch obj := e.Resource.(type) { + case *gatewayv1.Gateway: + h.store.updateGateway(obj) + case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + } + case *corev1.Service: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + + statusUpdate := &status.QueueObject{ + Deployment: client.ObjectKeyFromObject(obj), + UpdateType: status.UpdateGateway, + GatewayService: obj, + } + h.provisioner.cfg.StatusQueue.Enqueue(statusUpdate) + } + default: + panic(fmt.Errorf("unknown resource type %T", e.Resource)) + } + case *events.DeleteEvent: + switch e.Type.(type) { + case *gatewayv1.Gateway: + if err := h.provisioner.deprovisionNginx(ctx, e.NamespacedName); err != nil { + logger.Error(err, "error deprovisioning nginx resources") + } + h.store.deleteGateway(e.NamespacedName) + case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap: + if err := h.reprovisionResources(ctx, e); err != nil { + logger.Error(err, "error re-provisioning nginx resources") + } + default: + panic(fmt.Errorf("unknown resource type %T", e.Type)) + } + default: + panic(fmt.Errorf("unknown event type %T", e)) + } + } +} + +// updateOrDeleteResources ensures that nginx resources are either: +// - deleted if the Gateway no longer exists (this is for when the controller first starts up) +// - are updated to the proper state in case a user makes a change directly to the resource. +func (h *eventHandler) updateOrDeleteResources( + ctx context.Context, + obj client.Object, + gatewayNSName types.NamespacedName, +) error { + if gw := h.store.getGateway(gatewayNSName); gw == nil { + if !h.provisioner.isLeader() { + h.provisioner.setResourceToDelete(gatewayNSName) + + return nil + } + + if err := h.provisioner.deprovisionNginx(ctx, gatewayNSName); err != nil { + return fmt.Errorf("error deprovisioning nginx resources: %w", err) + } + return nil + } + + h.store.registerResourceInGatewayConfig(gatewayNSName, obj) + + resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) + resources := h.store.getNginxResourcesForGateway(gatewayNSName) + if resources.Gateway != nil { + if err := h.provisioner.provisionNginx( + ctx, + resourceName, + resources.Gateway.Source, + resources.Gateway.EffectiveNginxProxy, + ); err != nil { + return fmt.Errorf("error updating nginx resource: %w", err) + } + } + + return nil +} + +// reprovisionResources redeploys nginx resources that have been deleted but should not have been. +func (h *eventHandler) reprovisionResources(ctx context.Context, event *events.DeleteEvent) error { + if gateway := h.store.gatewayExistsForResource(event.Type, event.NamespacedName); gateway != nil && gateway.Valid { + resourceName := controller.CreateNginxResourceName(gateway.Source.GetName(), h.gcName) + if err := h.provisioner.reprovisionNginx( + ctx, + resourceName, + gateway.Source, + gateway.EffectiveNginxProxy, + ); err != nil { + return err + } + } + return nil +} diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go new file mode 100644 index 0000000000..19a24cb832 --- /dev/null +++ b/internal/mode/static/provisioner/objects.go @@ -0,0 +1,524 @@ +package provisioner + +import ( + "fmt" + "maps" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +const ( + defaultNginxErrorLogLevel = "info" + nginxIncludesConfigMapNameSuffix = "includes-bootstrap" + nginxAgentConfigMapNameSuffix = "agent-config" + + defaultServiceType = corev1.ServiceTypeLoadBalancer + defaultServicePolicy = corev1.ServiceExternalTrafficPolicyLocal + + defaultNginxImagePath = "ghcr.io/nginx/nginx-gateway-fabric/nginx" + defaultNginxPlusImagePath = "private-registry.nginx.com/nginx-gateway-fabric/nginx-plus" + defaultImagePullPolicy = corev1.PullIfNotPresent +) + +var emptyDirVolumeSource = corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + +func (p *NginxProvisioner) buildNginxResourceObjects( + resourceName string, + gateway *gatewayv1.Gateway, + nProxyCfg *graph.EffectiveNginxProxy, +) []client.Object { + // TODO(sberman): handle nginx plus config + + ngxIncludesConfigMapName := controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix) + ngxAgentConfigMapName := controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix) + + selectorLabels := make(map[string]string) + maps.Copy(selectorLabels, p.baseLabelSelector.MatchLabels) + selectorLabels[controller.GatewayLabel] = gateway.GetName() + selectorLabels[controller.AppNameLabel] = resourceName + + labels := make(map[string]string) + annotations := make(map[string]string) + + maps.Copy(labels, selectorLabels) + + if gateway.Spec.Infrastructure != nil { + for key, value := range gateway.Spec.Infrastructure.Labels { + labels[string(key)] = string(value) + } + + for key, value := range gateway.Spec.Infrastructure.Annotations { + annotations[string(key)] = string(value) + } + } + + objectMeta := metav1.ObjectMeta{ + Name: resourceName, + Namespace: gateway.GetNamespace(), + Labels: labels, + Annotations: annotations, + } + + configmaps := p.buildNginxConfigMaps( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + ) + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: objectMeta, + } + + ports := make(map[int32]struct{}) + for _, listener := range gateway.Spec.Listeners { + ports[int32(listener.Port)] = struct{}{} + } + + service := buildNginxService(objectMeta, nProxyCfg, ports, selectorLabels) + deployment := p.buildNginxDeployment( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + ports, + selectorLabels, + ) + + // order to install resources: + // scc (if openshift) + // secrets + // configmaps + // serviceaccount + // service + // deployment/daemonset + + objects := make([]client.Object, 0, len(configmaps)+3) + objects = append(objects, configmaps...) + objects = append(objects, serviceAccount, service, deployment) + + return objects +} + +func (p *NginxProvisioner) buildNginxConfigMaps( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, +) []client.Object { + var logging *ngfAPIv1alpha2.NginxLogging + if nProxyCfg != nil && nProxyCfg.Logging != nil { + logging = nProxyCfg.Logging + } + + logLevel := defaultNginxErrorLogLevel + if logging != nil && logging.ErrorLevel != nil { + logLevel = string(*nProxyCfg.Logging.ErrorLevel) + } + + mainFields := map[string]interface{}{ + "ErrorLevel": logLevel, + } + + bootstrapCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ngxIncludesConfigMapName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + Data: map[string]string{ + "main.conf": string(helpers.MustExecuteTemplate(mainTemplate, mainFields)), + }, + } + + metricsPort := config.DefaultNginxMetricsPort + port, enableMetrics := graph.MetricsEnabledForNginxProxy(nProxyCfg) + if port != nil { + metricsPort = *port + } + + agentFields := map[string]interface{}{ + "Plus": p.cfg.Plus, + "ServiceName": p.cfg.GatewayPodConfig.ServiceName, + "Namespace": p.cfg.GatewayPodConfig.Namespace, + "EnableMetrics": enableMetrics, + "MetricsPort": metricsPort, + } + + if logging != nil && logging.AgentLevel != nil { + agentFields["LogLevel"] = *logging.AgentLevel + } + + agentCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ngxAgentConfigMapName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + Data: map[string]string{ + "nginx-agent.conf": string(helpers.MustExecuteTemplate(agentTemplate, agentFields)), + }, + } + + return []client.Object{bootstrapCM, agentCM} +} + +func buildNginxService( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ports map[int32]struct{}, + selectorLabels map[string]string, +) *corev1.Service { + var serviceCfg ngfAPIv1alpha2.ServiceSpec + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil && nProxyCfg.Kubernetes.Service != nil { + serviceCfg = *nProxyCfg.Kubernetes.Service + } + + serviceType := defaultServiceType + if serviceCfg.ServiceType != nil { + serviceType = corev1.ServiceType(*serviceCfg.ServiceType) + } + + servicePolicy := defaultServicePolicy + if serviceCfg.ExternalTrafficPolicy != nil { + servicePolicy = corev1.ServiceExternalTrafficPolicy(*serviceCfg.ExternalTrafficPolicy) + } + + servicePorts := make([]corev1.ServicePort, 0, len(ports)) + for port := range ports { + servicePort := corev1.ServicePort{ + Name: fmt.Sprintf("port-%d", port), + Port: port, + TargetPort: intstr.FromInt32(port), + } + servicePorts = append(servicePorts, servicePort) + } + + svc := &corev1.Service{ + ObjectMeta: objectMeta, + Spec: corev1.ServiceSpec{ + Type: serviceType, + Ports: servicePorts, + ExternalTrafficPolicy: servicePolicy, + Selector: selectorLabels, + }, + } + + if serviceCfg.LoadBalancerIP != nil { + svc.Spec.LoadBalancerIP = *serviceCfg.LoadBalancerIP + } + if serviceCfg.LoadBalancerSourceRanges != nil { + svc.Spec.LoadBalancerSourceRanges = serviceCfg.LoadBalancerSourceRanges + } + + return svc +} + +func (p *NginxProvisioner) buildNginxDeployment( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, + ports map[int32]struct{}, + selectorLabels map[string]string, +) client.Object { + podTemplateSpec := p.buildNginxPodTemplateSpec( + objectMeta, + nProxyCfg, + ngxIncludesConfigMapName, + ngxAgentConfigMapName, + ports, + ) + + var object client.Object + // TODO(sberman): daemonset support + deployment := &appsv1.Deployment{ + ObjectMeta: objectMeta, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: podTemplateSpec, + }, + } + + var deploymentCfg ngfAPIv1alpha2.DeploymentSpec + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil && nProxyCfg.Kubernetes.Deployment != nil { + deploymentCfg = *nProxyCfg.Kubernetes.Deployment + } + + if deploymentCfg.Replicas != nil { + deployment.Spec.Replicas = deploymentCfg.Replicas + } + + object = deployment + + return object +} + +func (p *NginxProvisioner) buildNginxPodTemplateSpec( + objectMeta metav1.ObjectMeta, + nProxyCfg *graph.EffectiveNginxProxy, + ngxIncludesConfigMapName string, + ngxAgentConfigMapName string, + ports map[int32]struct{}, +) corev1.PodTemplateSpec { + // TODO(sberman): handle nginx plus; debug + + containerPorts := make([]corev1.ContainerPort, 0, len(ports)) + for port := range ports { + containerPort := corev1.ContainerPort{ + Name: fmt.Sprintf("port-%d", port), + ContainerPort: port, + } + containerPorts = append(containerPorts, containerPort) + } + + podAnnotations := make(map[string]string) + maps.Copy(podAnnotations, objectMeta.Annotations) + + metricsPort := config.DefaultNginxMetricsPort + if port, enabled := graph.MetricsEnabledForNginxProxy(nProxyCfg); enabled { + if port != nil { + metricsPort = *port + } + + containerPorts = append(containerPorts, corev1.ContainerPort{ + Name: "metrics", + ContainerPort: metricsPort, + }) + + podAnnotations["prometheus.io/scrape"] = "true" + podAnnotations["prometheus.io/port"] = strconv.Itoa(int(metricsPort)) + } + + image, pullPolicy := p.buildImage(nProxyCfg) + + spec := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: objectMeta.Labels, + Annotations: podAnnotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: image, + ImagePullPolicy: pullPolicy, + Ports: containerPorts, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"NET_BIND_SERVICE"}, + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: helpers.GetPointer[bool](true), + RunAsGroup: helpers.GetPointer[int64](1001), + RunAsUser: helpers.GetPointer[int64](101), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/etc/nginx-agent", Name: "nginx-agent"}, + {MountPath: "/var/log/nginx-agent", Name: "nginx-agent-log"}, + {MountPath: "/etc/nginx/conf.d", Name: "nginx-conf"}, + {MountPath: "/etc/nginx/stream-conf.d", Name: "nginx-stream-conf"}, + {MountPath: "/etc/nginx/main-includes", Name: "nginx-main-includes"}, + {MountPath: "/etc/nginx/secrets", Name: "nginx-secrets"}, + {MountPath: "/var/run/nginx", Name: "nginx-run"}, + {MountPath: "/var/cache/nginx", Name: "nginx-cache"}, + {MountPath: "/etc/nginx/includes", Name: "nginx-includes"}, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init", + Image: p.cfg.GatewayPodConfig.Image, + ImagePullPolicy: pullPolicy, + Command: []string{ + "/usr/bin/gateway", + "initialize", + "--source", "/agent/nginx-agent.conf", + "--destination", "/etc/nginx-agent", + "--source", "/includes/main.conf", + "--destination", "/etc/nginx/main-includes", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/agent", Name: "nginx-agent-config"}, + {MountPath: "/etc/nginx-agent", Name: "nginx-agent"}, + {MountPath: "/includes", Name: "nginx-includes-bootstrap"}, + {MountPath: "/etc/nginx/main-includes", Name: "nginx-main-includes"}, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: helpers.GetPointer[bool](true), + RunAsGroup: helpers.GetPointer[int64](1001), + RunAsUser: helpers.GetPointer[int64](101), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + ServiceAccountName: objectMeta.Name, + Volumes: []corev1.Volume{ + {Name: "nginx-agent", VolumeSource: emptyDirVolumeSource}, + { + Name: "nginx-agent-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ngxAgentConfigMapName, + }, + }, + }, + }, + {Name: "nginx-agent-log", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-conf", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-stream-conf", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-main-includes", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-secrets", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-run", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-cache", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-includes", VolumeSource: emptyDirVolumeSource}, + { + Name: "nginx-includes-bootstrap", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ngxIncludesConfigMapName, + }, + }, + }, + }, + }, + }, + } + + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil { + var podSpec *ngfAPIv1alpha2.PodSpec + var containerSpec *ngfAPIv1alpha2.ContainerSpec + if nProxyCfg.Kubernetes.Deployment != nil { + podSpec = &nProxyCfg.Kubernetes.Deployment.Pod + containerSpec = &nProxyCfg.Kubernetes.Deployment.Container + } + + if podSpec != nil { + spec.Spec.TerminationGracePeriodSeconds = podSpec.TerminationGracePeriodSeconds + spec.Spec.Affinity = podSpec.Affinity + spec.Spec.NodeSelector = podSpec.NodeSelector + spec.Spec.Tolerations = podSpec.Tolerations + spec.Spec.Volumes = append(spec.Spec.Volumes, podSpec.Volumes...) + spec.Spec.TopologySpreadConstraints = podSpec.TopologySpreadConstraints + } + + if containerSpec != nil { + container := spec.Spec.Containers[0] + if containerSpec.Resources != nil { + container.Resources = *containerSpec.Resources + } + container.Lifecycle = containerSpec.Lifecycle + container.VolumeMounts = append(container.VolumeMounts, containerSpec.VolumeMounts...) + spec.Spec.Containers[0] = container + } + } + + return spec +} + +func (p *NginxProvisioner) buildImage(nProxyCfg *graph.EffectiveNginxProxy) (string, corev1.PullPolicy) { + image := defaultNginxImagePath + tag := p.cfg.GatewayPodConfig.Version + pullPolicy := defaultImagePullPolicy + + getImageAndPullPolicy := func(container ngfAPIv1alpha2.ContainerSpec) (string, string, corev1.PullPolicy) { + if container.Image != nil { + if container.Image.Repository != nil { + image = *container.Image.Repository + } + if container.Image.Tag != nil { + tag = *container.Image.Tag + } + if container.Image.PullPolicy != nil { + pullPolicy = corev1.PullPolicy(*container.Image.PullPolicy) + } + } + + return image, tag, pullPolicy + } + + if nProxyCfg != nil && nProxyCfg.Kubernetes != nil { + if nProxyCfg.Kubernetes.Deployment != nil { + image, tag, pullPolicy = getImageAndPullPolicy(nProxyCfg.Kubernetes.Deployment.Container) + } + } + + return fmt.Sprintf("%s:%s", image, tag), pullPolicy +} + +func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName types.NamespacedName) []client.Object { + objectMeta := metav1.ObjectMeta{ + Name: deploymentNSName.Name, + Namespace: deploymentNSName.Namespace, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: objectMeta, + } + service := &corev1.Service{ + ObjectMeta: objectMeta, + } + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: objectMeta, + } + bootstrapCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: deploymentNSName.Namespace, + }, + } + agentCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix), + Namespace: deploymentNSName.Namespace, + }, + } + + // order to delete: + // deployment/daemonset + // service + // serviceaccount + // configmaps + // secrets + // scc (if openshift) + + return []client.Object{deployment, service, serviceAccount, bootstrapCM, agentCM} +} diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go new file mode 100644 index 0000000000..a505cf90ab --- /dev/null +++ b/internal/mode/static/provisioner/provisioner.go @@ -0,0 +1,354 @@ +package provisioner + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + "golang.org/x/text/cases" + "golang.org/x/text/language" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +//counterfeiter:generate . Provisioner + +// Provisioner is an interface for triggering NGINX resources to be created/updated/deleted. +type Provisioner interface { + RegisterGateway(ctx context.Context, gateway *graph.Gateway, resourceName string) error +} + +// Config is the configuration for the Provisioner. +type Config struct { + DeploymentStore *agent.DeploymentStore + StatusQueue *status.Queue + Logger logr.Logger + GatewayPodConfig config.GatewayPodConfig + EventRecorder record.EventRecorder + GCName string + Plus bool +} + +// NginxProvisioner handles provisioning nginx kubernetes resources. +type NginxProvisioner struct { + store *store + k8sClient client.Client + // resourcesToDeleteOnStartup contains a list of Gateway names that no longer exist + // but have nginx resources tied to them that need to be deleted. + resourcesToDeleteOnStartup []types.NamespacedName + baseLabelSelector metav1.LabelSelector + cfg Config + leader bool + + lock sync.RWMutex +} + +// NewNginxProvisioner returns a new instance of a Provisioner that will deploy nginx resources. +func NewNginxProvisioner( + ctx context.Context, + mgr manager.Manager, + cfg Config, +) (*NginxProvisioner, *events.EventLoop, error) { + store := newStore() + + selector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + controller.AppInstanceLabel: cfg.GatewayPodConfig.InstanceName, + controller.AppManagedByLabel: controller.CreateNginxResourceName( + cfg.GatewayPodConfig.InstanceName, + cfg.GCName, + ), + }, + } + + provisioner := &NginxProvisioner{ + k8sClient: mgr.GetClient(), + store: store, + baseLabelSelector: selector, + resourcesToDeleteOnStartup: []types.NamespacedName{}, + cfg: cfg, + } + + handler, err := newEventHandler(store, provisioner, selector, cfg.GCName) + if err != nil { + return nil, nil, fmt.Errorf("error initializing eventHandler: %w", err) + } + + eventLoop, err := newEventLoop(ctx, mgr, handler, cfg.Logger, selector) + if err != nil { + return nil, nil, err + } + + return provisioner, eventLoop, nil +} + +// Enable is called when the Pod becomes leader and allows the provisioner to manage resources. +func (p *NginxProvisioner) Enable(ctx context.Context) { + p.lock.Lock() + p.leader = true + p.lock.Unlock() + + p.lock.RLock() + for _, gatewayNSName := range p.resourcesToDeleteOnStartup { + if err := p.deprovisionNginx(ctx, gatewayNSName); err != nil { + p.cfg.Logger.Error(err, "error deprovisioning nginx resources on startup") + } + } + p.lock.RUnlock() + + p.lock.Lock() + p.resourcesToDeleteOnStartup = []types.NamespacedName{} + p.lock.Unlock() +} + +// isLeader returns whether or not this provisioner is the leader. +func (p *NginxProvisioner) isLeader() bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.leader +} + +// setResourceToDelete is called when there are resources to delete, but this pod is not leader. +// Once it becomes leader, it will delete those resources. +func (p *NginxProvisioner) setResourceToDelete(gatewayNSName types.NamespacedName) { + p.lock.Lock() + defer p.lock.Unlock() + + p.resourcesToDeleteOnStartup = append(p.resourcesToDeleteOnStartup, gatewayNSName) +} + +//nolint:gocyclo // will refactor at some point +func (p *NginxProvisioner) provisionNginx( + ctx context.Context, + resourceName string, + gateway *gatewayv1.Gateway, + nProxyCfg *graph.EffectiveNginxProxy, +) error { + if !p.isLeader() { + return nil + } + + objects := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + + p.cfg.Logger.Info( + "Creating/Updating nginx resources", + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + + var agentConfigMapUpdated, deploymentCreated bool + var deploymentObj *appsv1.Deployment + for _, obj := range objects { + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + + var res controllerutil.OperationResult + if err := wait.PollUntilContextCancel( + createCtx, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + var upsertErr error + res, upsertErr = controllerutil.CreateOrUpdate(ctx, p.k8sClient, obj, objectSpecSetter(obj)) + if upsertErr != nil { + if !apierrors.IsAlreadyExists(upsertErr) && !apierrors.IsConflict(upsertErr) { + return false, upsertErr + } + if apierrors.IsConflict(upsertErr) { + return false, nil + } + } + return true, nil + }, + ); err != nil { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "CreateOrUpdateFailed", + "Failed to create or update nginx resource: %s", + err.Error(), + ) + cancel() + return err + } + cancel() + + if res != controllerutil.OperationResultCreated && res != controllerutil.OperationResultUpdated { + continue + } + + switch o := obj.(type) { + case *appsv1.Deployment: + deploymentObj = o + if res == controllerutil.OperationResultCreated { + deploymentCreated = true + } + case *corev1.ConfigMap: + if res == controllerutil.OperationResultUpdated && + strings.Contains(obj.GetName(), nginxAgentConfigMapNameSuffix) { + agentConfigMapUpdated = true + } + } + + result := cases.Title(language.English, cases.Compact).String(string(res)) + p.cfg.Logger.V(1).Info( + fmt.Sprintf("%s nginx %s", result, obj.GetObjectKind().GroupVersionKind().Kind), + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + } + + // if agent configmap was updated, then we'll need to restart the deployment + if agentConfigMapUpdated && !deploymentCreated { + updateCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + p.cfg.Logger.V(1).Info( + "Restarting nginx deployment after agent configmap update", + "name", deploymentObj.GetName(), + "namespace", deploymentObj.GetNamespace(), + ) + + if deploymentObj.Spec.Template.Annotations == nil { + deploymentObj.Annotations = make(map[string]string) + } + deploymentObj.Spec.Template.Annotations[controller.RestartedAnnotation] = time.Now().Format(time.RFC3339) + + if err := p.k8sClient.Update(updateCtx, deploymentObj); err != nil && !apierrors.IsConflict(err) { + p.cfg.EventRecorder.Eventf( + deploymentObj, + corev1.EventTypeWarning, + "RestartFailed", + "Failed to restart nginx deployment after agent config update: %s", + err.Error(), + ) + return err + } + } + + return nil +} + +func (p *NginxProvisioner) reprovisionNginx( + ctx context.Context, + resourceName string, + gateway *gatewayv1.Gateway, + nProxyCfg *graph.EffectiveNginxProxy, +) error { + if !p.isLeader() { + return nil + } + objects := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + + p.cfg.Logger.Info( + "Re-creating nginx resources", + "namespace", gateway.GetNamespace(), + "name", resourceName, + ) + + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for _, obj := range objects { + if err := p.k8sClient.Create(createCtx, obj); err != nil && !apierrors.IsAlreadyExists(err) { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "CreateFailed", + "Failed to create nginx resource: %s", + err.Error(), + ) + return err + } + } + + return nil +} + +func (p *NginxProvisioner) deprovisionNginx(ctx context.Context, gatewayNSName types.NamespacedName) error { + if !p.isLeader() { + return nil + } + + p.cfg.Logger.Info( + "Removing nginx resources for Gateway", + "name", gatewayNSName.Name, + "namespace", gatewayNSName.Namespace, + ) + + deploymentNSName := types.NamespacedName{ + Name: controller.CreateNginxResourceName(gatewayNSName.Name, p.cfg.GCName), + Namespace: gatewayNSName.Namespace, + } + + objects := p.buildNginxResourceObjectsForDeletion(deploymentNSName) + + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for _, obj := range objects { + if err := p.k8sClient.Delete(createCtx, obj); err != nil && !apierrors.IsNotFound(err) { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "DeleteFailed", + "Failed to delete nginx resource: %s", + err.Error(), + ) + return err + } + } + + p.store.deleteResourcesForGateway(gatewayNSName) + p.cfg.DeploymentStore.Remove(deploymentNSName) + + return nil +} + +// RegisterGateway is called by the main event handler when a Gateway API resource event occurs +// and the graph is built. The provisioner updates the Gateway config in the store and then: +// - If it's a valid Gateway, create or update nginx resources associated with the Gateway, if necessary. +// - If it's an invalid Gateway, delete the associated nginx resources. +func (p *NginxProvisioner) RegisterGateway( + ctx context.Context, + gateway *graph.Gateway, + resourceName string, +) error { + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if updated := p.store.registerResourceInGatewayConfig(gatewayNSName, gateway); !updated { + return nil + } + + if gateway.Valid { + if err := p.provisionNginx(ctx, resourceName, gateway.Source, gateway.EffectiveNginxProxy); err != nil { + return fmt.Errorf("error provisioning nginx resources: %w", err) + } + } else { + if err := p.deprovisionNginx(ctx, gatewayNSName); err != nil { + return fmt.Errorf("error deprovisioning nginx resources: %w", err) + } + } + + return nil +} diff --git a/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go b/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go new file mode 100644 index 0000000000..b4359a1ceb --- /dev/null +++ b/internal/mode/static/provisioner/provisionerfakes/fake_provisioner.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package provisionerfakes + +import ( + "context" + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +type FakeProvisioner struct { + RegisterGatewayStub func(context.Context, *graph.Gateway, string) error + registerGatewayMutex sync.RWMutex + registerGatewayArgsForCall []struct { + arg1 context.Context + arg2 *graph.Gateway + arg3 string + } + registerGatewayReturns struct { + result1 error + } + registerGatewayReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeProvisioner) RegisterGateway(arg1 context.Context, arg2 *graph.Gateway, arg3 string) error { + fake.registerGatewayMutex.Lock() + ret, specificReturn := fake.registerGatewayReturnsOnCall[len(fake.registerGatewayArgsForCall)] + fake.registerGatewayArgsForCall = append(fake.registerGatewayArgsForCall, struct { + arg1 context.Context + arg2 *graph.Gateway + arg3 string + }{arg1, arg2, arg3}) + stub := fake.RegisterGatewayStub + fakeReturns := fake.registerGatewayReturns + fake.recordInvocation("RegisterGateway", []interface{}{arg1, arg2, arg3}) + fake.registerGatewayMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeProvisioner) RegisterGatewayCallCount() int { + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + return len(fake.registerGatewayArgsForCall) +} + +func (fake *FakeProvisioner) RegisterGatewayCalls(stub func(context.Context, *graph.Gateway, string) error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = stub +} + +func (fake *FakeProvisioner) RegisterGatewayArgsForCall(i int) (context.Context, *graph.Gateway, string) { + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + argsForCall := fake.registerGatewayArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeProvisioner) RegisterGatewayReturns(result1 error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = nil + fake.registerGatewayReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeProvisioner) RegisterGatewayReturnsOnCall(i int, result1 error) { + fake.registerGatewayMutex.Lock() + defer fake.registerGatewayMutex.Unlock() + fake.RegisterGatewayStub = nil + if fake.registerGatewayReturnsOnCall == nil { + fake.registerGatewayReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.registerGatewayReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeProvisioner) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.registerGatewayMutex.RLock() + defer fake.registerGatewayMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeProvisioner) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ provisioner.Provisioner = new(FakeProvisioner) diff --git a/internal/mode/static/provisioner/setter.go b/internal/mode/static/provisioner/setter.go new file mode 100644 index 0000000000..4195fd6d2a --- /dev/null +++ b/internal/mode/static/provisioner/setter.go @@ -0,0 +1,45 @@ +package provisioner + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// objectSpecSetter sets the spec of the provided object. This is used when creating or updating the object. +func objectSpecSetter(object client.Object) controllerutil.MutateFn { + switch obj := object.(type) { + case *appsv1.Deployment: + return deploymentSpecSetter(obj, obj.Spec) + case *corev1.Service: + return serviceSpecSetter(obj, obj.Spec) + case *corev1.ServiceAccount: + return func() error { return nil } + case *corev1.ConfigMap: + return configMapSpecSetter(obj, obj.Data) + } + + return nil +} + +func deploymentSpecSetter(deployment *appsv1.Deployment, spec appsv1.DeploymentSpec) controllerutil.MutateFn { + return func() error { + deployment.Spec = spec + return nil + } +} + +func serviceSpecSetter(service *corev1.Service, spec corev1.ServiceSpec) controllerutil.MutateFn { + return func() error { + service.Spec = spec + return nil + } +} + +func configMapSpecSetter(configMap *corev1.ConfigMap, data map[string]string) controllerutil.MutateFn { + return func() error { + configMap.Data = data + return nil + } +} diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go new file mode 100644 index 0000000000..bf78ee21c0 --- /dev/null +++ b/internal/mode/static/provisioner/store.go @@ -0,0 +1,196 @@ +package provisioner + +import ( + "reflect" + "strings" + "sync" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +// NginxResources are all of the NGINX resources deployed in relation to a Gateway. +type NginxResources struct { + Gateway *graph.Gateway + Deployment *appsv1.Deployment + Service *corev1.Service + ServiceAccount *corev1.ServiceAccount + BootstrapConfigMap *corev1.ConfigMap + AgentConfigMap *corev1.ConfigMap +} + +// store stores the cluster state needed by the provisioner and allows to update it from the events. +type store struct { + // gateways is a map of all Gateway resources in the cluster. Used on startup to determine + // which nginx resources aren't tied to any Gateways and need to be cleaned up. + gateways map[types.NamespacedName]*gatewayv1.Gateway + // nginxResources is a map of Gateway NamespacedNames and their associated nginx resources. + nginxResources map[types.NamespacedName]*NginxResources + + lock sync.RWMutex +} + +func newStore() *store { + return &store{ + gateways: make(map[types.NamespacedName]*gatewayv1.Gateway), + nginxResources: make(map[types.NamespacedName]*NginxResources), + } +} + +func (s *store) updateGateway(obj *gatewayv1.Gateway) { + s.lock.Lock() + defer s.lock.Unlock() + + s.gateways[client.ObjectKeyFromObject(obj)] = obj +} + +func (s *store) deleteGateway(nsName types.NamespacedName) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.gateways, nsName) +} + +func (s *store) getGateway(nsName types.NamespacedName) *gatewayv1.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.gateways[nsName] +} + +// registerResourceInGatewayConfig adds or updates the provided resource in the tracking map. +// If the object being updated is the Gateway, check if anything that we care about changed. This ensures that +// we don't attempt to update nginx resources when the main event handler triggers this call with an unrelated event +// (like a Route update) that shouldn't result in nginx resource changes. +func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedName, object interface{}) bool { + s.lock.Lock() + defer s.lock.Unlock() + + switch obj := object.(type) { + case *graph.Gateway: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Gateway: obj, + } + } else { + changed := gatewayChanged(cfg.Gateway, obj) + cfg.Gateway = obj + return changed + } + case *appsv1.Deployment: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Deployment: obj, + } + } else { + cfg.Deployment = obj + } + case *corev1.Service: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Service: obj, + } + } else { + cfg.Service = obj + } + case *corev1.ServiceAccount: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + ServiceAccount: obj, + } + } else { + cfg.ServiceAccount = obj + } + case *corev1.ConfigMap: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + BootstrapConfigMap: obj, + } + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + AgentConfigMap: obj, + } + } + } else { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + cfg.BootstrapConfigMap = obj + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + cfg.AgentConfigMap = obj + } + } + } + + return true +} + +func gatewayChanged(original, updated *graph.Gateway) bool { + if original == nil { + return true + } + + if original.Valid != updated.Valid { + return true + } + + if !reflect.DeepEqual(original.Source, updated.Source) { + return true + } + + return !reflect.DeepEqual(original.EffectiveNginxProxy, updated.EffectiveNginxProxy) +} + +func (s *store) getNginxResourcesForGateway(nsName types.NamespacedName) *NginxResources { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.nginxResources[nsName] +} + +func (s *store) deleteResourcesForGateway(nsName types.NamespacedName) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.nginxResources, nsName) +} + +//nolint:gocyclo // will refactor at some point +func (s *store) gatewayExistsForResource(object client.Object, nsName types.NamespacedName) *graph.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + resourceMatches := func(obj client.Object) bool { + return obj.GetName() == nsName.Name && obj.GetNamespace() == nsName.Namespace + } + + for _, resources := range s.nginxResources { + switch object.(type) { + case *appsv1.Deployment: + if resources.Deployment != nil && resourceMatches(resources.Deployment) { + return resources.Gateway + } + case *corev1.Service: + if resources.Service != nil && resourceMatches(resources.Service) { + return resources.Gateway + } + case *corev1.ServiceAccount: + if resources.ServiceAccount != nil && resourceMatches(resources.ServiceAccount) { + return resources.Gateway + } + case *corev1.ConfigMap: + if resources.BootstrapConfigMap != nil && resourceMatches(resources.BootstrapConfigMap) { + return resources.Gateway + } + if resources.AgentConfigMap != nil && resourceMatches(resources.AgentConfigMap) { + return resources.Gateway + } + } + } + + return nil +} diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go new file mode 100644 index 0000000000..0b4d1ca308 --- /dev/null +++ b/internal/mode/static/provisioner/templates.go @@ -0,0 +1,73 @@ +package provisioner + +import gotemplate "text/template" + +var ( + mainTemplate = gotemplate.Must(gotemplate.New("main").Parse(mainTemplateText)) + // mgmtTemplate = gotemplate.Must(gotemplate.New("mgmt").Parse(mgmtTemplateText)). + agentTemplate = gotemplate.Must(gotemplate.New("agent").Parse(agentTemplateText)) +) + +const mainTemplateText = ` +error_log stderr {{ .ErrorLevel }};` + +// const mgmtTemplateText = `mgmt { +// {{- if .Values.nginx.usage.endpoint }} +// usage_report endpoint={{ .Values.nginx.usage.endpoint }}; +// {{- end }} +// {{- if .Values.nginx.usage.skipVerify }} +// ssl_verify off; +// {{- end }} +// {{- if .Values.nginx.usage.caSecretName }} +// ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; +// {{- end }} +// {{- if .Values.nginx.usage.clientSSLSecretName }} +// ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; +// ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; +// {{- end }} +// enforce_initial_report off; +// deployment_context /etc/nginx/main-includes/deployment_ctx.json; +// }` + +const agentTemplateText = `command: + server: + host: {{ .ServiceName }}.{{ .Namespace }}.svc + port: 443 +allowed_directories: +- /etc/nginx +- /usr/share/nginx +- /var/run/nginx +features: +- connection +- configuration +- certificates +{{- if .EnableMetrics }} +- metrics +{{- end }} +{{- if eq true .Plus }} +- api-action +{{- end }} +{{- if .LogLevel }} +log: + level: {{ .LogLevel }} +{{- end }} +{{- if .EnableMetrics }} +collector: + receivers: + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} + processors: + batch: {} + exporters: + prometheus_exporter: + server: + host: "0.0.0.0" + port: {{ .MetricsPort }} +{{- end }}` diff --git a/internal/mode/static/state/change_processor.go b/internal/mode/static/state/change_processor.go index 426feff686..1cd72f7612 100644 --- a/internal/mode/static/state/change_processor.go +++ b/internal/mode/static/state/change_processor.go @@ -69,8 +69,6 @@ type ChangeProcessorConfig struct { EventRecorder record.EventRecorder // MustExtractGVK is a function that extracts schema.GroupVersionKind from a client.Object. MustExtractGVK kinds.MustExtractGVK - // ProtectedPorts are the ports that may not be configured by a listener with a descriptive name of the ports. - ProtectedPorts graph.ProtectedPorts // PlusSecrets is a list of secret files used for NGINX Plus reporting (JWT, client SSL, CA). PlusSecrets map[types.NamespacedName][]graph.PlusSecretFile // Logger is the logger for this Change Processor. @@ -285,7 +283,6 @@ func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { c.cfg.GatewayClassName, c.cfg.PlusSecrets, c.cfg.Validators, - c.cfg.ProtectedPorts, ) return changeType, c.latestGraph diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index b25f3fbd99..0b1fef6057 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -966,6 +966,10 @@ var _ = Describe("ChangeProcessor", func() { refTLSSvc: {}, refGRPCSvc: {}, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-test-class", + }, } }) When("no upsert has occurred", func() { @@ -1574,6 +1578,7 @@ var _ = Describe("ChangeProcessor", func() { ) // gateway 2 takes over; + expGraph.DeploymentName.Name = "gateway-2-test-class" // route 1 has been replaced by route 2 listener80 := getListenerByName(expGraph.Gateway, httpListenerName) listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) @@ -1627,6 +1632,7 @@ var _ = Describe("ChangeProcessor", func() { ) // gateway 2 still in charge; + expGraph.DeploymentName.Name = "gateway-2-test-class" // no HTTP routes remain // GRPCRoute 2 still exists // TLSRoute 2 still exists @@ -1679,6 +1685,7 @@ var _ = Describe("ChangeProcessor", func() { ) // gateway 2 still in charge; + expGraph.DeploymentName.Name = "gateway-2-test-class" // no routes remain listener80 := getListenerByName(expGraph.Gateway, httpListenerName) listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) @@ -1724,6 +1731,7 @@ var _ = Describe("ChangeProcessor", func() { ) // gateway 2 still in charge; + expGraph.DeploymentName.Name = "gateway-2-test-class" // no HTTP or TLS routes remain listener80 := getListenerByName(expGraph.Gateway, httpListenerName) listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) @@ -1769,6 +1777,7 @@ var _ = Describe("ChangeProcessor", func() { Source: gw2, Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), } + expGraph.DeploymentName.Name = "gateway-2-test-class" expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} expGraph.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} expGraph.ReferencedSecrets = nil diff --git a/internal/mode/static/state/graph/gateway.go b/internal/mode/static/state/graph/gateway.go index b6cfd49ebe..d41364288d 100644 --- a/internal/mode/static/state/graph/gateway.go +++ b/internal/mode/static/state/graph/gateway.go @@ -10,6 +10,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ngfsort "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) @@ -104,7 +105,6 @@ func buildGateway( secretResolver *secretResolver, gc *GatewayClass, refGrantResolver *referenceGrantResolver, - protectedPorts ProtectedPorts, nps map[types.NamespacedName]*NginxProxy, ) *Gateway { if gw == nil { @@ -136,6 +136,15 @@ func buildGateway( } } + protectedPorts := make(ProtectedPorts) + if port, enabled := MetricsEnabledForNginxProxy(effectiveNginxProxy); enabled { + metricsPort := config.DefaultNginxMetricsPort + if port != nil { + metricsPort = *port + } + protectedPorts[metricsPort] = "MetricsPort" + } + return &Gateway{ Source: gw, Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), diff --git a/internal/mode/static/state/graph/gateway_test.go b/internal/mode/static/state/graph/gateway_test.go index 58c6638abb..02d4ff7f18 100644 --- a/internal/mode/static/state/graph/gateway_test.go +++ b/internal/mode/static/state/graph/gateway_test.go @@ -153,9 +153,6 @@ func TestBuildGateway(t *testing.T) { labelSet := map[string]string{ "key": "value", } - protectedPorts := ProtectedPorts{ - 9113: "MetricsPort", - } listenerAllowedRoutes := v1.Listener{ Name: "listener-with-allowed-routes", Hostname: helpers.GetPointer[v1.Hostname]("foo.example.com"), @@ -1304,7 +1301,7 @@ func TestBuildGateway(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(test.refGrants) - result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, protectedPorts, nginxProxies) + result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, nginxProxies) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) }) } diff --git a/internal/mode/static/state/graph/graph.go b/internal/mode/static/state/graph/graph.go index 4e98bf8402..88d6b4abe8 100644 --- a/internal/mode/static/state/graph/graph.go +++ b/internal/mode/static/state/graph/graph.go @@ -16,6 +16,7 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" @@ -84,8 +85,10 @@ type Graph struct { SnippetsFilters map[types.NamespacedName]*SnippetsFilter // PlusSecrets holds the secrets related to NGINX Plus licensing. PlusSecrets map[types.NamespacedName][]PlusSecretFile - + // LatestReloadResult is the latest result of applying config to nginx for this Gateway. LatestReloadResult NginxReloadResult + // DeploymentName is the name of the nginx Deployment for this Gateway. + DeploymentName types.NamespacedName } // NginxReloadResult describes the result of an NGINX reload. @@ -208,7 +211,6 @@ func BuildGraph( gcName string, plusSecrets map[types.NamespacedName][]PlusSecretFile, validators validation.Validators, - protectedPorts ProtectedPorts, ) *Graph { processedGwClasses, gcExists := processGatewayClasses(state.GatewayClasses, gcName, controllerName) if gcExists && processedGwClasses.Winner == nil { @@ -240,7 +242,6 @@ func BuildGraph( secretResolver, gc, refGrantResolver, - protectedPorts, processedNginxProxies, ) @@ -307,9 +308,18 @@ func BuildGraph( setPlusSecretContent(state.Secrets, plusSecrets) + var deploymentName types.NamespacedName + if gw != nil { + deploymentName = types.NamespacedName{ + Namespace: gw.Source.Namespace, + Name: controller.CreateNginxResourceName(gw.Source.Name, gcName), + } + } + g := &Graph{ GatewayClass: gc, Gateway: gw, + DeploymentName: deploymentName, Routes: routes, L4Routes: l4routes, IgnoredGatewayClasses: processedGwClasses.Ignored, diff --git a/internal/mode/static/state/graph/graph_test.go b/internal/mode/static/state/graph/graph_test.go index 54e1456124..3b2fcb5a6f 100644 --- a/internal/mode/static/state/graph/graph_test.go +++ b/internal/mode/static/state/graph/graph_test.go @@ -36,11 +36,6 @@ func TestBuildGraph(t *testing.T) { controllerName = "my.controller" ) - protectedPorts := ProtectedPorts{ - 9113: "MetricsPort", - 8081: "HealthPort", - } - cm := &v1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", @@ -1014,6 +1009,10 @@ func TestBuildGraph(t *testing.T) { }, }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-my-class", + }, } } @@ -1083,7 +1082,6 @@ func TestBuildGraph(t *testing.T) { GenericValidator: &validationfakes.FakeGenericValidator{}, PolicyValidator: fakePolicyValidator, }, - protectedPorts, ) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) diff --git a/internal/mode/static/state/graph/nginxproxy.go b/internal/mode/static/state/graph/nginxproxy.go index 3b013b8f39..e9993ab73f 100644 --- a/internal/mode/static/state/graph/nginxproxy.go +++ b/internal/mode/static/state/graph/nginxproxy.go @@ -110,6 +110,19 @@ func telemetryEnabledForNginxProxy(np *EffectiveNginxProxy) bool { return true } +// MetricsEnabledForNginxProxy returns whether metrics is enabled, and the associated port if specified. +// By default, metrics are enabled. +func MetricsEnabledForNginxProxy(np *EffectiveNginxProxy) (*int32, bool) { + if np != nil && np.Metrics != nil { + if np.Metrics.Disable != nil && *np.Metrics.Disable { + return nil, false + } + return np.Metrics.Port, true + } + + return nil, true +} + func processNginxProxies( nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy, validator validation.GenericValidator, diff --git a/internal/mode/static/state/graph/nginxproxy_test.go b/internal/mode/static/state/graph/nginxproxy_test.go index 80c7ef6401..2289bb7dab 100644 --- a/internal/mode/static/state/graph/nginxproxy_test.go +++ b/internal/mode/static/state/graph/nginxproxy_test.go @@ -373,6 +373,83 @@ func TestTelemetryEnabledForNginxProxy(t *testing.T) { } } +func TestMetricsEnabledForNginxProxy(t *testing.T) { + t.Parallel() + + tests := []struct { + ep *EffectiveNginxProxy + port *int32 + name string + enabled bool + }{ + { + name: "NginxProxy is nil", + port: nil, + enabled: true, + }, + { + name: "metrics struct is nil", + ep: &EffectiveNginxProxy{ + Metrics: nil, + }, + port: nil, + enabled: true, + }, + { + name: "metrics disable is nil", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: nil, + }, + }, + port: nil, + enabled: true, + }, + { + name: "metrics is disabled", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(true), + }, + }, + port: nil, + enabled: false, + }, + { + name: "metrics is enabled with no port specified", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + }, + }, + port: nil, + enabled: true, + }, + { + name: "metrics is enabled with port specified", + ep: &EffectiveNginxProxy{ + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer[int32](8080), + }, + }, + port: helpers.GetPointer[int32](8080), + enabled: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + port, enabled := MetricsEnabledForNginxProxy(test.ep) + g.Expect(port).To(Equal(test.port)) + g.Expect(enabled).To(Equal(test.enabled)) + }) + } +} + func TestProcessNginxProxies(t *testing.T) { t.Parallel() diff --git a/internal/mode/static/status/queue.go b/internal/mode/static/status/queue.go index 5f31bbec6d..991718648b 100644 --- a/internal/mode/static/status/queue.go +++ b/internal/mode/static/status/queue.go @@ -4,13 +4,28 @@ import ( "context" "sync" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) +// UpdateType is the type of status update to perform. +type UpdateType int + +const ( + // UpdateAll means to update statuses of all Gateway API resources. + UpdateAll = iota + // UpdateGateway means to just update the status of the Gateway resource. + UpdateGateway +) + // QueueObject is the object to be passed to the queue for status updates. type QueueObject struct { - Error error - Deployment types.NamespacedName + // GatewayService is the Gateway Service that was updated. When set, UpdateType should be UpdateGateway. + // Set by the provisioner + GatewayService *corev1.Service + Error error + Deployment types.NamespacedName + UpdateType UpdateType } // Queue represents a queue with unlimited size. diff --git a/internal/mode/static/status/queue_test.go b/internal/mode/static/status/queue_test.go index 0bed3cee62..8ed8bbb5ab 100644 --- a/internal/mode/static/status/queue_test.go +++ b/internal/mode/static/status/queue_test.go @@ -26,6 +26,7 @@ func TestEnqueue(t *testing.T) { item := &QueueObject{ Error: nil, Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + UpdateType: UpdateAll, } q.Enqueue(item) @@ -41,6 +42,7 @@ func TestDequeue(t *testing.T) { item := &QueueObject{ Error: nil, Deployment: types.NamespacedName{Namespace: "default", Name: "test-object"}, + UpdateType: UpdateAll, } q.Enqueue(item) @@ -73,10 +75,12 @@ func TestDequeueWithMultipleItems(t *testing.T) { item1 := &QueueObject{ Error: nil, Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-1"}, + UpdateType: UpdateAll, } item2 := &QueueObject{ Error: nil, Deployment: types.NamespacedName{Namespace: "default", Name: "test-object-2"}, + UpdateType: UpdateAll, } q.Enqueue(item1) q.Enqueue(item2) diff --git a/scripts/generate-manifests.sh b/scripts/generate-manifests.sh index 731b359272..f52743e382 100755 --- a/scripts/generate-manifests.sh +++ b/scripts/generate-manifests.sh @@ -33,7 +33,7 @@ generate_manifests openshift # FIXME(lucacome): Implement a better way to generate the static deployment file # https://github.com/nginx/nginx-gateway-fabric/issues/2326 -helm template nginx-gateway charts/nginx-gateway-fabric --set nameOverride=nginx-gateway --set metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml >config/tests/static-deployment.yaml +helm template nginx-gateway charts/nginx-gateway-fabric --set nameOverride=nginx-gateway --set nginxGateway.metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml >config/tests/static-deployment.yaml sed -i.bak '/app.kubernetes.io\/managed-by: Helm/d' config/tests/static-deployment.yaml sed -i.bak '/helm.sh/d' config/tests/static-deployment.yaml rm -f config/tests/static-deployment.yaml.bak diff --git a/tests/Makefile b/tests/Makefile index c95f337d19..a7e8e5717f 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -14,7 +14,7 @@ NGF_VERSION ?= edge## NGF version to be tested PULL_POLICY = Never## Pull policy for the images NGINX_CONF_DIR = internal/mode/static/nginx/conf PROVISIONER_MANIFEST = conformance/provisioner/provisioner.yaml -SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation +SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation,GatewayInfrastructurePropagation STANDARD_CONFORMANCE_PROFILES = GATEWAY-HTTP,GATEWAY-GRPC EXPERIMENTAL_CONFORMANCE_PROFILES = GATEWAY-TLS CONFORMANCE_PROFILES = $(STANDARD_CONFORMANCE_PROFILES) # by default we use the standard conformance profiles. If experimental is enabled we override this and add the experimental profiles. @@ -169,7 +169,7 @@ delete-gke-cluster: ## Delete the GKE cluster add-local-ip-to-cluster: ## Add local IP to the GKE cluster master-authorized-networks ./scripts/add-local-ip-auth-networks.sh -HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginxGateway.kind=skip --set service.create=false --skip-schema-validation +HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginxGateway.kind=skip --set nginx.service.type=ClusterIP --skip-schema-validation .PHONY: deploy-updated-provisioner deploy-updated-provisioner: ## Update provisioner manifest and deploy to the configured kind cluster diff --git a/tests/framework/ngf.go b/tests/framework/ngf.go index 45a1eeecc9..48cd1b9ed7 100644 --- a/tests/framework/ngf.go +++ b/tests/framework/ngf.go @@ -230,7 +230,7 @@ func setImageArgs(cfg InstallationConfig) []string { } if cfg.ServiceType != "" { - args = append(args, formatValueSet("service.type", cfg.ServiceType)...) + args = append(args, formatValueSet("nginx.service.type", cfg.ServiceType)...) if cfg.ServiceType == "LoadBalancer" && cfg.IsGKEInternalLB { args = append( args, From 1e7f4d963994b21416e75f1a845cac2c440e6789 Mon Sep 17 00:00:00 2001 From: salonichf5 <146118978+salonichf5@users.noreply.github.com> Date: Thu, 13 Feb 2025 17:06:58 -0700 Subject: [PATCH 09/32] Revert "CP/DP split: Add leader election (#3092)" This reverts commit a5c989ebbefb9bd857e6b962488ec05704d14c0a. --- .../control-data-plane-split/graph-conns.png | Bin 23782 -> 22038 bytes internal/framework/runnables/runnables.go | 28 +++--- .../framework/runnables/runnables_test.go | 22 ++--- internal/mode/static/handler.go | 19 +--- internal/mode/static/handler_test.go | 26 +----- internal/mode/static/health.go | 87 +++--------------- internal/mode/static/health_test.go | 86 +---------------- internal/mode/static/manager.go | 25 ++--- 8 files changed, 47 insertions(+), 246 deletions(-) diff --git a/docs/proposals/control-data-plane-split/graph-conns.png b/docs/proposals/control-data-plane-split/graph-conns.png index b383363917f44f6cd478d8d2fdb5175008fa693c..bb41cd488e53fa54f625eef29454e2f44d8c5a5b 100644 GIT binary patch literal 22038 zcmeFZcT`i~wm%vJ2-2%`P*D&N5J5mXA|PGqQk7mq=)L5Nf=W}Wbm>y169^>=f=Va! zBoL&RfYJhlz%S@I=bm@({r$#$cii{Kd+#s?WACi;S#!@h*IaYYHTQb;NKb=?l7kWe z0MKYYd|&_okbnUIQeO%ZVhK5rG>mw;V)xKM7XS$62LK|U0|3XwqR3SM!0$Eyu>KSP zkbMULu!6H&AIlRf79Z&tshypjEiNvlq-Dx0so%UM@yy<(zM)yqz!d(eYkC^#;}?v_ zQxJ@6NR3`pPW=w*Nq^?WmUAS>|8-U!8!TG2M4(2l~o-Bi-EzR z!r~8D?AG&`SYr#jvdWr=dZt-gn$v3~>YbczOr8dU%7q1AZ(nm6TO|?;jc-9W#IGaLusponDE$2x{n{TAZ$Ua-~HcmQ}w z#{H1^;}ZDc6CVJ8E$jUMf~H?o3bByfS5sGwe3g=&p5e}f)(=hq!0Ay(&wve-Dy-A6AvoE%20L{yWtCl%G!dABcV2jypf4aOm zuf-S~9SmJMOt^qaYeQngPZ-U+vreGMXC{}f|8l|6n}5F4A^+6i(t}@bn+uh;6`!X8 z;55_Na0-!mX8)ePfX`Hs(*4p&9k(aRBY9CY16I5MYp}FrKo}tK6M7i6e*r@lpz1@c zWc{yH26D;{?A%Ws_xqT(yx+cm0nTotDsA5UVgkL}?YI6}X|O?btC9{d5y;UxqULh8 z*2q~FJ+fTQ;itsEw>|%sz>2p!K72>mzrEh7jDeT@lJu=0$9;@7s%RWc9tj`+Hq423 zG%obu@j*Z0PabvlaOE_>+ZF6NFT}th9t%#ZS^@e>%&ly$oHdVyDW6RGnj1^*v7?dB z%f=NO`NOIXtBErc1R5Z@RD52(Rnz8!5!;BGGaAs#dOCw)1H_>q~5;WRYjwe_9$=?RtNel^ZRdDJmrJ>k7ID zA+LX5YzP?D<4GMUjDKQ!S3STVYer!b`3hTe=^p9|oxDK@N{keejYUvUgwY9U-J&kK!nuE8P=5PLjOA zeV~_25M^o(@ZoCLqf*60-ZT;ST?OmCy4M4*79FITa;ZMYh(tR9bq)#XfcZ$W1qObG?nSo~X);xi3oAl>VDbch|4iiW* z%yR*-M4S7uP!=1-E&uKNu>cHdo6 zBAFRKz`i0ML93Q#t1D-vU_|Om> zdAo?+$Xr30ZIoK5isuUTzqq3J4ZyHK6 zb2B&OCDcus;Rzkg9(RzA6%CKuYn9T?Szm0qhsIfb2ooZ!o>WuqdZry=yrHsYGMzHQ zq>f{4!BcS#TUQ7n2!Xl^fafdrn94KqtXDJ3!h*Dr3YR7`{wTp*$0*s-x0(WKPKBvR zB_F%-dU)!dkwe55@@kp6-2IlvG`myUawp0=5yJ=RnLm2elo zXLkyt<%9C6{QWZcV^{aDl*z;JdO8$TKZ!eNb6W%UhTo6j_6&cmkY72r;iC>M#BEdJ zqMW;~QganSJTFa+k52@AylZWmvNam5J5rCna}S__wqH_ z>mYW}?(?hZqzO`GJqKzSca^RM2-vQY7XW_%s4|&{v-P`XJW+0H>NqCzX4U z?rF_1eHEo9>J1_r?eTYFTJ}K!Cp{VoK0KYcA3C{ajIT!M(&8a?!H7+5$GC ztu>9xNrIQzeVo z^hAH_-*G^_(59AMF6YqC)hOHH|JFG9%BEMko3b5@RMWw{4nDdg6}KaY4Nwpj|Byq+ z;p5FI*=VSR&b1o$Ng%713eRt^%f0!bMV6nofY#Dryusp>*~NMgDvF=mVvFAH3&?|4;zXl6MI3CZNk5Kgl)L@~{V#_6A2dYZl-}?ks=cdmnnO+%plO~vpKfyg1HX@idFt(7Xw#=DzOQ5{2a_d@BDr3VvUT^PjT zceXTpzfIs2s4+g*FQi$%yb{PlIJ%`UC>=zw5)=I2cIwl=8umfbE^6(c|W zr~E27*L_yJZz>c|FQYHD{m#L?Ba9+jAS0t7JoFjXdCogd2+cI~3+(0uO zWY>N-;YrUjG6o)0_b<4A$GfwE2u^bq``}&uS6S ze_v|0qg?nkt!g$MXF$I}6S||;1JU5oJ1lE!i%fW;gThvDG)A_LOST&4fHrRQwY)R< zY{t=S_ZO2FzPuIDYIZ+IPg`q4Ew+VOxY;lt*7WWrcNgR((v*#5+OkH-%_`<{ZOVC3 zAks8Tvf;U~voDkR=lehWsHqA(Y>MlL8LJCGs={Uu8xrPK8nAA@&SaBs*V)LIogQqk z@jO6wIKRmHx{@*?l}kR-BxXMUg0ku<`#*kBph{jfg^f^ZV5D>#u(2jGz-AXBY{O>; zs4kNSzJMhG?vd$?WUT^|f)KyK_`q5GGY4wITo9z>Z@~X&A*$}IY8W|-Sf}WJUT7Up z31(D$KH<%BRCszO+K|F_v|AhY)=KEtx90rR!?%j-rxFb*8((>-UY?joJo|b&6R^E9 ztR;Vx-i16B%TEp5*|o;mcv;^)o-TU$IV{p-r8cbdxycVrrj6i(bf{B`zFXYXgs+ZF zzaU@M0qUfww2!cp=`c6PV+uGuu6P7ZTa|Cszr{<2e}<1>X0#|BU7=EsLa z6Ne%@7YgDI-(MsTPw-Y>8%%n&>mGlHSoXT{m(F2G_W2kR6ZLn)wzr1PJU+vATD85*>p1nw$B5a?%~k52 zbq%C3tDkk^1Eej+66eAxxQk;qk8A9{$9UGRML<`%&$!G>%m^^FzRmrWHLh*NDa8nU zcLFY@5GTHNHbQy&lhF19cM!y@8q4zG4AGouGM$)D;uo#to}5A3J%^RU-xVS#g-(UZ z$cf{Nf08kxs#P@XDbt}lp@0VsqufU0t2k+d{n!u^V$_b z(K%6Yl}-WFzL&%%7b)$r!hCgN*0Yr>;Nk5{0bwX=IdVu;xIT1a!Ex#y1=n84XDeRm zu0a*d`#GkqV<@{nEs&g$3!dy9sPUnfS;akv{R{H{AG{1`d6so;-+bqGZqR*M5inbY zbe?xIoTbr5yJx z5kQqadfYFej+LEUI~^z`rLdfF@dH6#jO#UgOy?2>$KRU+m+SbM1mPwMy2z@92Bl|B zRyk}wNSls&16MRk#RAU+!DXO)UfU(DH{1b*yH$d zW2^jW_S0hxAC(0`aE?c4KCF4-bmU_7<8Nm6%rEjilS(wz?_PJ+F#_q9+`$Ky(Hchb z>yzJ=+Ix?WcTEx%$(@bBPd?d-u5TF3(GX6hGsFa=SEVNxO-TnS%hqSx%@A}XulTW= znUhfj>EcPnw%K7}ixp+M1Vp+@*{`**<_LvFkc0W%y_Gcap%VB=@|_$T)DVd$$7dH` zk;Xb9d~(%u*-2*IRQsaOF4?(mu(F;}m-k{#`z1z09MwtQ9K7Ue5nHo`Zy!j@25grF z+3h`{mvKt)J~Q7lEVLxYE9x2;^Yn|i6B?^(>V|4N*)<)?ch-Xzdds)1y87DmaofgA zz?J9HGl3fmpR#B~eMkK5Zx$m9oT>2J&J`ZJCQcJB8Qhr%AWm~DHaXtayHyYT93ql$ z81LKyt06B(J0|$=yH0jDovE)J=(|@4BLqBfX|2LQ>Gi=WR&7m_*=4f!O#HJY9ydSneo{im8Q1_#%JzGX$rIA_~WZ;iN3 zc~WNjB$`+|w66c0^*tyPs{dk~+3KA4_{I=W!_&IL!u-ZmG=g#fs*xh{US7~s1w*?G z4qw%jAAdSWQW#To+kYUytUe8a%zL!%ruAktN(39UfX#Z?T9L-lDc1e27d|a}(I!k@ zhC#;reLP(iW9lq4{k~|)-Qt$WsHP{?hODleeRHH! zeDe?+sxGrk@P)_s_j$nd_PZjlWxyn<>&sW0#=K=pl?=AE%{XwAy)t4baw(aBKc%3@Lo^qbuZqbGOLFy)oym7{ zXn8v@)2*nUHg3E=+bswdG?of;o5LmaVAK;E>T04W$*LRq5=Fn)2U~kz1Bb*{1AHhf z)iRm1xZ9hO)ha5UPjE_U`k5LA8GBP?BV$+i>6%K)9>{@(gQYpm14D{=2iGGcV?wGn z>7kh%C8(ZSD^>k)@a_}N2KCZZkG8N_)U1F&+AIgGQDvw`+;_HsJ=skQ&kX%lMbq(; zBpnKG36Q==S}k)@GO?&KV<*{~w?=42O0|~3y2lc!-nR#RX#m!{_>id8g_vqPQuEQ8tRH|5a#6RknGhR78@OO>cP3p=QzJQ;{XQ*>hmQV+@6oa&&k z+;@K-QrROky7XSac$Xo;|H9fW^hZ|4^pPHqF5Rr00Z#5t6%3D!+hyqTGzHGNP;?ts zBsSJC8?5+wWE0U5HE7XQU%~~Pyo8F@&c0+>HbA6wF4|{X$)lv$-y*lJEblVd%w(Nk zeO&U>hf-Wo;S0G$)8%+Hzcx_g<9riS9yzFqMq_-q#~{h@$Hw4?QnJnTRDGs`;LK^` z>c|~&u>TnQ;9AhPX?RQ6!?JJ&nPwVk9(#wq$bzs%6Eh1P&VYIkX!KYyBD0_mT~dcn zR%u4Gwa6)!K~*p)wm5cHz1qf;fV9{1!ILdNf)?|K4{Ox~4iqipGG4&*Fqu*ABEjHoZ5W zcYrK1Kk5n3oc?|oBIDnYgF4f_#VRJNswxX%3R@PTquRVHb-h9=hN0T0b9%ejz4VPy)6fe!ooobUg+Ku9cYiZ(+Q2Ez;O%8t z`_`T2l}8(`V7sD%g?)5Z^5GYVTMlpQ7QIX+b{Gc|bMU{}SY(2N{L@gRP+pV$>jTO)T@Np~2)%x%ctUC8yGK?#o zO)WnD?5zm)6@8<~QaV;G?i05D{L2+cVbpl(P;4dk!AQ2OC<{N>zpqBR^u#r$vq@=b zvIfEzbX3L?T~VVjj?^U|2Ea+UMoFg><^bhLuVDnq(|V($NWHF`skQR7_4=I`+&*I&Gt?T7A2sV@4wSfY&sUx+BD}rN>%= zDfAuiGouI2mW8JRbdT)$jhTZlLh>ePZ%PVnV%W(zu(EZDnLdpeXsY;W=GQ0qk3<)G zJr;9#Ds?_t z`+O|?e|4-Yw}@e^e*$j!Y)2k%Lpno#5^480Iyiw@%2%{(?}92@KY6@7ASC_=g8x0p zf0sIwz|Hr+VwLfcyx{(8!-^PkuJiOP06j-n#n>> zIsUCPcUP42+}ZztNX+MJqk6=yoyQvg13?$W#h+gWz|-J>3kBCF-V#$N9y3;W*^Sdk z=~ec`8%slJY6tuOM|d$}0+VzW^0_Tk3L`DDK<;ovbczGV3{jZQff4RZap};e6k1SI6Lg!yM|1YDv zXa75S{gV!xA=6pTLw%Jr>BO03>tR#I*Y0lmk6*=o!;Df|l?l)8H()nfAI3nZh+22c znK%Po-$3c(Mr5POf_Tk8jZ^1rje(I4B`&MwQ^^^_lm+V2`7F2IZ&K+(c`p*a8cq8t zW$|RlX_k+?-w@o7k9j%8UiTR_`DkS5t2v9gN}D7dw!9 z)#vw=s+}L5(yC{2D`0RdTSy5QcpeF#ega?ph3c=y+09A^TrK+)`b+k-7k?2%G8>{6 zv7+#p%ehu*QEMyMS$f7tIsI*OJ_{;~S?=WHQZ=LkHoQdkj6Pyd&to^foeW2fa$X-^ zdXaBPY?HAZL0NUhe2W!I+>7UTO7XWp%TpDGN?$vOCi~>Xin3iO1kZ05 zPi}T7&wen;I{uQ8>Dt#D`vw0v6jJHZkkpY5XZjV+Y;(m)tLnZsCOS~*wb&bppvrWa zR^Fd{fl|n5GpGYz(^$gLd`V_E-0{VW%4gLn&g)V zgAE3)jM;)5ro9yV3A&Zo_x}DQobDVSgN7ga(sfH4he85KATaVDSwbCf806Bt0{yne zFS5Tr{Jz||Xt(IreNJ24ClicYck=_{oeIX9uj9rKsy5C(rQ9fZ`$drpo{xf_wvjk6+>n&&zvCL#}j`ode43 zAM*SzR5AMJ3w!;^o>S28680#x=tqV*#$RN!G@7gN!X_3;(|Hk=$62Z%}lab##@b5U#vje@QWU9vV zgFsY8dY1p~1rDn{LAX!35TE^z+B}00rS)6<6etDyKawEI`27tG&3XEoZNrIJW`{Lw z#r}wqn3Z)_q4=|t%mN6=%0G3>OY5wAsttH5QIxoVXyg@m(m_gpV+z#yS_$WJq<~?3 zaBVh`s0MV?l6)r>m!PC?|0qaT4R2m~q&EGla~}QFJ8212goa*XWH?vU*-O%VmXf@d z9i*=)4(!T#G*#VRB|iHV3O-`AACaGVysG6qJSEW{8BzbcQ^5yb?4h=^V8D+WJM%Mk zbImwOe|4z4aa-u6fja!{s6JbTO67o!WU|no+7YvA)S}@5v?8 zk(YU3f?&Hc?`->7josHS{70Mn&5k#OM##x199B{y%7%pke+v56%`SKG=4aJ2c4{i# zREE+y%u6!;Xy?{e?6mt@%1g{o<78jZ`bJL1bvArVbOviMXb_Ii)HDK(4X3PncH@-) z=sn=^*}f^aISG{_mgl~9VAc?fdouM?{1I$13i#kP`%-r=8h`G7CZf08oM|@Ryq5G$5%np7>ARwya;S&UJGCJd4F)8HB3S6m5C(X+H3i`7KjZM z{0pv$nC|uang1cG2T67{WZg@n>W^zi!z^BU!2TUs@qa9|Ah@mV0;B|M)x3nH?s0 z#%B~U_V$NS{KC7@lsiFpi;WTuweTy(E4bR5q@qxsUt~KIXKXTZ(8h+SI~g8bYt^&c zk}k&Dzv|g5>Ev`@7u*JM{`98*J7rW{*kyf=Li~;c5n9tFrY5P*Y--xxT%P+fc+uJ^ z=qmWr{!6hp&8J>mv@7Z*QMzw~!b|L*A!N4bLg@y=Ee5!$^|2AOLsalh$~ZW)!O`m zA8q1<-lgY;_MJS`FpyVtWDo)uT1}U<%XxaPa+>34{a=jgyFHaNLTZ#&;Tx(;%Qf(D zW~g;QI&bch$JEq*>?PEGcv1c17<)-py-8Ivy)~)AoM8}V!m5Hwmfcn?bMQ$iZ?rwQ zB2mh(_syzG`OInw?9lLf8x1EcTpR+MO~mdMiNoCW*hRsu&XltApH@Ldg|(?7fnqb4 zr$$S4z86iD2wI>Ks($yvd^7ZtzoJ?+RVXdlXOGFqwzQOJbMvlHvq4jg5)sj*xq~>) z|KE8}^zt56WZ)AC*qB$EKZ`D}@w_54_w9BNcLXs|DeU)eOrr%@c zwLP=a5G15OXVQh%yE4^f7z1QJZ`?oIC!m*x3(^@K6iZ*E@{l~M{81B1zbIg!Jw6PCNwW zpRxa$wEw3@WNW;vdCGt5s&dRAbAf0RZa!233MdmH3P+XpN;G5n%QiZ=nT%JPN-BIL z?Ixv&yLoGAmhtQms%ni;i1?a*<72Y0;FjAZZ0c3U`sq2N(I85V$zZqLN~@hGVt?j+ zPm#^Y7g8TeE~^VAMhB%$#ln}S;{0-GEcGXYEpB)JnaQ{L3HPj`@3+855~B>}el&m2yaMBHN zSu4wkm0!B#!Jit-V@_kiRz-R2t4fYmsRV;v}IzP}3R{_r+V^K7a@C@jqB z;|;qRF5;u+!kFJ#ha+~3_bR_K5{?b3!(YT-Oi1S4HhAha9nWZjsK3^?XX^LZ{^Q_v z!L;mwyLN?;g+-!kQBCbhILDk6yqUlq;bZ`B&KzB!~ ze#U@kkNGWF=)7+9wk78KtudQJEQCt|SNiZbzAFSVO{>1$S@yc1IIBY_pi>SlK$df| zvX&D1BYms z&l{F)TFIJ9 z*eR{JC^v=N*ph|Zi2G2PbwnU}l9i_AkgSSp9{pFr{G6^w0*seqFn5PTe^C zh6YfY?U*1B)`c=_XxbS(+5pZ#vFZWX65r)%^hDP_8WCHU-HlC|*6 zW6epnI=a%CF-c7hzimH~pfX78SE6AW)Ex@j=LKrSknK3~KvTlk5@_h9Jk&>oU454u)|jzl1q<;zUZ!6>&nCyiy2<9h!KY4uc*fh-jK%R;X)wY6Im+)LRVb3(D!+jm)} z+DsZK!pP-)aq=39#}F(iY%NZPhD3s0ka{m>6pX$9SVf-I4{mmU^!^ z1xFwAcH|vW(g0Upj2`pu|J=MV5#C0Ye%Z{(|B7C60r%?g#;N4>rNOqwPPXxR=SStc z<=_3pr)&~hjSnFFzhqX=ik!ZljzzeTZl!^#v%`|l`gtUJCljTAi~?~1{63|78LAKx z7Sri*O%2|M-jYTxWmK|RR*imd?YD}Q=71H;Hv64<2^;0e%b`y6GGb$UJZB+;LMCZl*;2r@Q|J6 zbrO!)z}IEntCBR%j* zxNxJR80aHwTvlqO8D-!fWn$G3F}J2UuC&ixn&RTL<$u&WKw3T7m)ND0XRsd8nnA9u zG|M;yOfDISVO52~w$y{BbgGQ9jQ9HEqVds9uDxZ2tX3Gp`%+2xwyh#S&Ws1GqqRQHJS9P3(m zeUtEG;tjBsX&>GJ;NV$o`| zuy~08pBg5G4}}eGwP=M`mjO@<>-*`yEA**iWLd}2G+squScl)W*0Ij}g3 z*mx;`u2@njKuJP&p{@k^df78~?v^B&xOQif(c&Va;zPn={iQxd)%Vg0mO?6&5{R-J{;EDXL->B%+| zKG(VAqNem1)AI?moXj<|7@_r44a+sU_mdN^h74DGWtN0ENUv*5mY3hfBMp$f zuXQT2K^nTeC)KN}bB3O{KA}y+)eCqYT)uvkw5n$adbOiYzZf3BaCp#a>5Q@6SxALFi$`u`F2|s2#3pZYXQ69)FWve4 zR>Gk9(_0>(Mjp0=Iqi7O4)nT5U?YD_>h=)wR@p#9q*EUU3Sls5n=G)!reD)p z`pAQGN(vwB!uLJO5k1tcL6&e%h#N6Q3To3{n`K~+`LV;-EWy`JF6t+zXMJ%H6!fku zIz7wa;K0Q{*TtC?)j+klpHi1ulH$)ftSh)1e{rk&3b^^IC590#W%q0uqU~|Nu1iY2 zEcWq;Vx$&wKmzt^a-_j|bJN{R=9Vz;_om_;{$iv{(pvbV<>{2Vd!oRZNQFif-l%U^fiA!eEH>b@Eaf5Oduj}0e4$3sgcN%W(}IFZ7Co7Z}g zBy)%Zxf>AwJsbk!`PwYg1hiUCOJZg~V|a{0;B2^vIVBIvOG##9BGxRw?@iYy{Ot{B zfmBkb0za1BO8>}JAZO_t;@WGs!Hg^C7D?3Xy{qVxH{=7a$iCBAs`WnCvtFytcbPAf z$dcxUs#ryg-#H_TrpTVV=d9{AH5!=wO>ALA;&hRGP>(Q{(U)54;6G|K&{&~-3BpNG zba1Hc>EXqt`xZ4;GpH>gguT9^}a`I z97KlPW1(sG*Sbgas3c%-^*18dy%IF%QoqxD+mbFMbcp47+eb+3A(FL^eS*K9TIx&= zeuN#I(nQpM3oyS8Y>qP})K6&p;KZrCvtSQP>+LlI?mqUtUL4Bxj`-!6YXdPQGGFXT z@;d_s%|;25U`i2TWQ23gTFgk8eQNZ;SetC;(J6$}Z{+e!$Is#Q!97HWTd^BrZ6e2h zV{qEXzNbO00zC}@t@wE+xkhAQ2zB1a^;b1rp6msFT+_b^&d{sp3y@XCY-v7+@NG2u za^|-qQPhu`U+gk*!+dkEqsS^!O?(UB%1Gfo!C0Z7%h1ovJyxSLgIB7=dcQ4vvBMO4GBD)*iPi`F);$?D0`WrwLRa7c`f@2s(p zT_7eUhWpvieewsIhRi10$v1z*)F?lRAxmjWTk{B|1^GY<`S8pQpQ(?Y8C_d2&N148Km~7l z6gxWYqsT8pb_*o2M{xFL7K0RB(N#;WyJ(nOHb`q)7vjdNe>~`$wTU*-88XtGy}@tj zF}gH#Svha+p1hsx8f+5r1hZs_DZj#(t!2rN`e4*aojuCj{aA2ZXXDb$rK0)*{Z`rS z+{?pLPSl9z13Js7O$wzO!=m7tdT^}E(0U8q1wE{5dGO35&mBn-4vzWEYur0CdSkIk zQYy59?7_y24`J&W_3UGSQ^`x5*YSpxs-m=`(Zwu5-3c$ zfToQg02owJDt8w~!c| z87RDIo>;P%5hLHsGL}P3ysNP>(Q~7@n=?@DyX2UUDBBri*srh?vLz{{O3rerhk}u7 zTS8O^DjvxDD+Y=O*AV=kRnpE>&81=oIUjhodsn+z4smoGnk_D|EK19<)C)^D+N84de8_A%^F~Wo zh4HDkw#9itHjL3cM=4)kL+T|mK<q_O~gSeERssMNVxfi9KI+I8-K^w zB;?KvbOZwZbSlrR76dl^<=y-W1#hvc|MIM6j$9NejY@E}N;O(|$humP`X+ym+TWxo z=9$*p5S!J-MOWXUn!bWQ>JcyG{B5*sKvmy65+}4tGBTHZaJYAI{3^eyzCgYZ$8w#0 ztISfY{u8rp_RLsQDNy@vF(F8nR)aGrY*<1!+Uuc^&-!QKg@=0gYU>AfZ)L-n57X-8 zL=Lg=vTp3dP?Vn7>$Q?uJdKt2z+r>f_ppc6{W`Xpj+u|;=U5vfqN#oHqvvNQjG2V7m&j1ST5GzL~ zf)OK{FqO0hzg&T!xn_N;sYEbyMRnGJ+cblyLL9hkR(~A7u&vb(sRtRBDmgrBPQWK~ z*0zyoyejmFW7;$4^!tIMKrzL@Tb|fBl`anvV=42rS@pi}5!l#ux#Sy9kP}4E*zJ{I1!zFqezVWKy}mJW4%?lNRQi*}T67At38Emtn>t`Hvo z6Sn**Gf%B5V3Tti2eNZ|;dG#ubG&Ov{9dR-H-myJi>xPq1r&YHbL|soW~EeVxC(70 zr~lG)@)f=V8P@qvO@cZ@4(rq}m=aI8nOE1okD0o4n^qw}q92Cd>b*+|js6Hj<2gkr zA$Ij}nLX6NZGYxzg|;)u+)W%|Lm!5Y;T*WhAfreXr#eIcItiq@9|hL$1Ax7b5Uj_@i_b>6z0YHHxLPMn%Fj|JL0et)L5kSV>OSs-6i}pHV%%Co7*#PW@wYM zxP-zIzE;9{{b9xj(%pE~#d zPd^A>HT zTL#RkRCn(YexBeO^5F*sDsL;vaBYVSovll((omWu62Ul;t^R-k`e_L(#$w*fV0+K& z%4Mb30!H5z(hKf1~$jT&a2ZIdw}?0!jAEFSpChI^sg;wa)-dw&K)4@(1*>&*){Y{AETR1;4=1ZOcYV z`|`@=t}9?=(pt8Q5Rz-eE)-n!Dz~ClFp(7h=n_my`;geIA;_7m_f>I3f%S|I=N}hHl#^oDaPQY9Knups7= zh~2*k8k;fN;>D}FNIn)|&Up1}(-wccZ%4uPR0jip47bE^;KNL(#Awt?9(993Wl^Hm zN-yCTdY;o}^OV5Z(KBlfjyk78cN{DhiwVu&Wf4fv=z@p6O>Rjgh!W?_0+Sb7#BZZQ zIDq7AOj2OAXT3KmEzAAX=~@Pwg6ubIoScL2P5CyN<3Oaok3GyOvkNp*b#D=}_=qQ` zKs02IJ86j44GT!ugK4>!6L)-Y*R$#?*L}0>Te%sFY1GO&)wMnp2o~2JamFOw$#Ai{ z3XYV3kfiTPX&vOnEvIRUV}OAUScjA48KP@ICwpz`JP5r1 z!bbp(XI>;Gqc`u8Rf|yIigJ4-Z7CVU{P1EfcY-M_>k{qm%MoWR{M|65P_uwgTDNnxUsr^3!((%O%MXV z8fhmuxJmEoNHHH|mBOcMYuQTR(=8=hyDn7vR)@z7`*aVl24YYQGFh@ZMhTwwx7SL0qR*>Uj&l3DPz(gMA_3r9j<`kRHPtw3a+0(h{Zou z1(H{{9@x!E2NQPBo6JHu#g3+M+%{+O87)KJkVh9~$4^g*5&gV5rxs-)tM*X#m0uE} zvb5inCs}96L0~5BfZF^nW`s|9&_}qjZ#g?4-EjTxtp%sU`?&9`w%I`GCg1kt*WvFa zL{n*=dNyngS??^aPj+Sxq8vGy++)lu_O`@+aLS+$r0fkbvQ=(&rc}OfKca?9{amZC z(l2@$x0#^g(l}4GFK_J4Ht|Zep(_kOWiwE^ME** znFx4#k8ET4 zf*etemtApgt@{D_s5vEc`C$Q(Bz{k}Tv5v(ikF9-Ri%RqVOg%m969=s`UPX+IWg$< z%>T7_eO~1su0=X-@668q1w1ZhaiRI|ZT@ezEZjWz_s35l1CH~~-{qeh{NT+4*T&O# z_)Dad#CKfb&1se0Ib}ENs&#i__uSno_Tszh-TsZm-{qG57TUkB`qfOJPL1Qd@0FgM zn`3g>Rrr^t!50hGR|UWGoXg+){mwi!^X>YlsrBLS*Kd9B$-=hh0ON;g50{_zyYzMY z8w1m*46mF1);=%nrS}}Hwd&h;|5e!Z%WvD{nWtY2mC)J33#@TIoG3gTF48|SZQ{&# ztGrfTVp}G=DruIg-+uWQLX~CvGBQNGzttPE<}JvVx>f#Dr{HeE$JfjXQ_Okp->k`y zdidtrEW=wn=Bj<0{nu=k>~)j3`HBh_d)x;Hv1I+1ZtRU&J(X0%=7MY zzH@HLyJbDA4Bu8*@Lhkh>+GEgp}Su`?LH9dyWD!J4R_4dvZGJe^EdeDnf;Nzo$!w1 zpcIhDGL5Z|Z|~0-vG=cl0m9_Xy!>|3Mc@pn#Rg5Fx)-Ij*KhB)V-VuV{c&ME|AAF4 zX29x^Jt5Rd+V0D1<_%gnQl09Sht5E4tA%cTSQszg5Xw13wZ8|3gDR+%Q2VzXcnGB~ zLxSzU8^FUQe}8)Kw4I?rwDN{+4d?e)@0s~GFswNP+|qD7Aozopr0P=j%fB*mh literal 23782 zcmeFZXH-<(vMAaFL8Ji%$p{$8k~0VhiqH*B4nh+pHlbIZ0|V*b0(! zY*K^dq(sTD`TX`i``mZly=T1pe%v?4W(>w!tLB_Ft7=x&tXZp8ztd1tBD=wG0{{S! zfs~(W0RRLD0D#c-Ispzsj8Eu`yRcd)Ye4`2FLnSR;57hnf&&Gt0sw9T0KmE#03ekB z0MH^*>NOwX8jdwobrdfzFMs@4iit~lB=?N-p|H6ntg@ zzqPgfIwVvVYEe*JuB@h?oSv-&(MwFp2#t&h3Hx|(a2OUDtNGm2+TPjd1uXnyoR!^6 znWqqaV@prpU^^#wcdtNuxQmmshn=G{!r7yvv!}AUZsEsLZb6Ap;9K9Ix0N+@1A`+i zZC^_(stwI-O3SNNG!4Ie?T}Vbv2k?W+uKi0&k7C+|JL3a_$Jgl0BL6BsHS5qD*c3u zSEQuuGsvr76ek3{7b;3m0dKF}QN}%xAe0SU006q=tA9L@TW|~xNbCxNC=##XU!x+Y zL}T#v0RY1a&{KIG&#A2@lJp-EH8UMu?+c4d?~x$z#7BmNhtKPqY)@eApC1~SJecf{ z`=PjdbScn&js9a>&%{mAmw0KHWzsw}M4o`zCjqPgb`sni|6l*j2YaaRh(A33`73qj zGv55H&;C}C_M>CIJAm!(Ve7rKwL40FUS6{fvL}2jfbGtP-sL^oxqA`2uPWt>D8GK~ z=&f`j;rP89_KfZAdy2;UC&SB4P9%WokG;zrbChc4hK)XqjQ|Ps*}XMV7heFXP3BYG z20<&yHeRnW3qTD)VY__-Km>U|b{QuQpqeH~YLEq_%rg&6AK=Xs9tA9yQUEwm#3CNP z0EvGLT?tUDH2R*|2zP>+$QZH!QeKj+8Wud`9Nm^KjitSi&;~zx&H^ZY30yUN_vLJu z3#xq({L5G}AFjMQx5Y8cvL`miUd+%D0M~K4td6sb(RwA6(89eYU6NE> z9Q(_n)Na>O%QnZ=O4V+bU5l*UPk`yUI2_No@Ng+yBeN{5jfkz&(s72{Kx;Hl*QPiz zKD^K?<3wvXGO7K72uV=X&ikpPR`toC*=)k4H}qQ~MW<~64G6s$q{(G`PvWe zRwh2nDsPI@j{clnGoUs4ZWYIvZBin&uR)Q+$sHi^Sh&UGJn)iYdUbBDN~{|3-aSaC z!z*nLt(6gJ7(=91QQJf@uYHN(aLq^|y*~vy&~1@nfMAAH1H6BzReC48!USHrN>pDy z%r$CFX1tM@n^%!IsivVQF6$p$G_W}VIs;iHy2=f`(6>h9>F|VT`UWTd+{znOdp|7?vBJhwtRSNU~-^x zjJWm1{~PW3pDLai^MGf&_*>FtymPn8;{_-I;AruDIoXLM&vVM>kQXmNdbjG8wb2`& zS2#N$3Wgp4OHZ6XyUFlu!^f(3}Ow+`|j@KJp0Vwe~ zxpU7Q%yZ&b7s$g00FDZZI{?5#LLhN#P%8rP@cRGXZ|?0AWUQ+1KuH0E(S>&koOl6W z`}?6S=bKOx0MEQS6n)-VO9{|>Tp?-KWQ)lmXX{ z-<%zWI#C0PUz>I!T2A!vTEUzHnS^HYe)72HrQg)NU_e}!Ug05b4MsJ%bx+0stzN%F z=6rYnW{M-Wn)qhN7hA8W2zLB^PG&p|@DUMZ&Ed}KX+H7nfV|_H$wzYM`+WGa=bkN- zJ_p8WfRu$(>pg6!)2*Y0?vUQ)RHs`wC$zXM$1cHc7Mo-tZPJu`ut;IS;#s24^|x;dsK!t@2Cct z9HJeG1qyStAtAzw;@n#eiC;koF7$;s^NI4?-vY-U;X6UMzd+K&G#0U!m;9YhoD8 zotFe+%M=~!>g{(huC_r1iN!Aaj_-pe6BxS2v>ffp#uL(qopM0pFk6n?XTycHZYDX} zCyFpXb4$7vGFz>nP-Qmh7%3|W=WxNq0TSIu(O6_BD0f+Kj)MK8R5i^&mqf)wokG>h zGUXa%Q7=tXK~4J+#A0^Y!ca$8D`>G_S~seJ)Itmd#ejEv`FrwgfO$UgYg^ORbMDnY z3`K(&^T;@;>?I%{`Aiv0^%uV-!G489Yr0016>VzDa=bvTn@`B#QP-1V ztY&Hu(~c9}R7j&267|^(N%}}(`gITSFV%HI(OnW*S-qB7pW_L0s|vHT{XX{i%z3iSOk?EznasCw8`pB6h;qTvw- zUfW=*nl5H&6rKKw^LEFfhD5ee_s-LR!^4G{2)+XQ;cV5l16J=7cHRfouYOpfvU`wG z4jNOvTOHcOtzy*~!YwSDp@pBepBaF`X!eAW;|T!g=F&84hcEjg|Ija5DBH-fE5z@M zf=z8+#S0-{Zw3l%6yT1t0Xav9W>=${^ODHVjh}B4G{j9Bo?1v~&ZM=!{|HC>#V$n|P*5SI*mPd*y}8 z!xIF|pgx0ywhqVUjDqH^HNP`S1}1jVVlTb~IQ~+k`c$zL;`sG79%dnw{mOg)dxC$9 z=zmj-l>8#u|3e|AcHMAc0#@yBP3%v0YG#{`&g-?AqueI^qisiydlw#a4BH*?N6G5V zqSGXQG`rtu-{JUhJO1yBXQ$=Z z$(QGP*k7V<`VGzj&oO*?v31MkzCC}lLcy0>WchRNuhZYVLdzW9*lK>OE8l%v7wbkl zHy{F=5(##?bGKsqZCzq{Xj$i~mp#uX#)otmYL(3m>>mJj*RqTkI8tXYNMneciiyJ>G|+0!v+ z>SMJ_xjkOCrx*1+&8=}@pLftX@U*f={pC8e%gwR@NZ34<&hl9`*sZC=2oj8#l^HMCOTZ&x zWzsQOXnWy^WZTKjCDiYSdgi`XbrsPG6YlSC%%}O1t&4lW(;Vk3nb$&F!g} zjA~8~(WkbM_q5N>TTbF`abMEeo_+#WD%O?Uy%?+w+c;hcd1!k}UMP9;c0D*&nc(dbe1R`({(E`lF^1SGfw!vy za$M4E6%!x$lety3^WSyX+WK{bgAyyQK-*$1uV~9A ztprs(K^p!P|9OsCY4|la!%Yq@YPzEw7j)y<3Z$ywsv{B z(?BenF3Kc)-jZGtE%&*(n8H}j-?s<=o~XLn=Kw z0w9W8*{4DHEL0EG9X!?1x?ez-*RDBHGDXBTHpucn>Op7~2t$EgaYjY;S;?LPMpN9c z_~gy2sCqFm=k%pxE2JsT?9i-6nYb!BbnJppTQbvB5bf?NNQxOPq^C zxN8LsyBb-@#tRJZ$V8i(OWe;cmTIW1-|(h{Oht>7dF?rYPiMEwcCtHd)LFYiqdJ+yIMMTIfgh8$5>uSo$KRIfmTVtC53f-UFCJ3R zGRhfNBe}eczc=B>g9OJSo3hg}79t;34VMlR*6&yMlOZeHG*wSCJ*unNCfs0AT`w4! z7O`cjr z7k5Me41$1rc98*h#*Glw;5)h-=QR9R~$CicX>2M;=E zvU?-5g2(fbY9zU>Yp-*+IEE}gNpS5Dp&f(K?N5{zxG0wCY)3g1l7;6Edkn)u zP()w$&17qht`L;&6e+A|u&iS!IjPAmrek;`cFKW&wBoQ$7bzSfCb|GBlRS*ZZYJK( zY2|hAs20RCN>=F|V|t=kI&K`*LpHvDD3cSbLFB3?cNa}!Pz~O@B_;wYBp)@T;higVFB<-V>y%-hyvhWQ9-P}Xg2&4*^&7z9SG_)|- zUUZ=Iiw?IXs(s`U`1U_I6(jJnM@5Nn{E(WejaAGmdfms^4$_5c9(GyUOJG867%AA* zEodO$c-L&lVmw`}9;J-7kdil)IoPA#-%yL9uJ`Z` zgG;qMl#X#~B9kVXH)i=80$oAc96%QT96gzp>_Em*GpFz5TL4*Z+)K|T4(Z%mwqmFn| zt8_~!2iY;bn|JAbleDtXCQ=2iQgqIbB2B0$Yz}sUESyZWI}&F=C(}^vYC4r~m};OC!l{x=TQ{uiQGzik`@iOku?`m5K8~n!2EKGVi9P=euicrI@sXMs)w?%jVh!@b=2VSeX8R&Sv754m<48;qcF2*;Cc+hyIM9oUCp@&5kdOT= zV=?i?Q*iouw2@vKw2uJ}>{yP)b~Q1;;~BG*shb#dd+jCG@wE)3QfVnS2B%EW;qj>l zA1PacvF@iy^rf>!Sa6p9M|~vhpN==cvl!lR);rPK29cc;i?W3(oG^#xaF z=P%u&B^7*a!{Z6o!uB|0n6Sns(8ezHA|-7+MKopW33!wTJ!y|J=i$kscyREM(L9`v zf7^N-AzGl)A*`5;QL#FWHW9UGsFRt_M+IXyn|Jk*mE#CZnr1yKPr-Z+ocl)UuLZdK zFNX`UY)nm>Hk5N|o1xFB-L>z;6ETKpr_Hoas>8OMbQPRwl}Nb^kkm@m#dVABs$*!uw&2|iSWf2@x+sd__yvWMan=WWbQ%rQRnkc&suQ7sd@%CY+ zDOh_ATnAA}uH2c}EpZ^q7pP*EhV;Z8HS!9;ia~lgwW3D$Es0)S$%7AaxNOXws8(FR ztaD0v!@$qsPM!(RylgNHsdmB`mwX*IhdW$re!kvSxBM%y42%?x4Wdld?yJ>XIXyK7 zj)h1m6)n5@qZ-EQxTvnGIU1k)95c?zAMG}C9I|@~jMs}JiIqqv$f?S+RMy3K!m+E> zcd9g(az5>;u;;e&Hn=NuCt1wyT(^) zBx8rohk<4)8DrPPaWR$x&f*`MGMXY$&T;#$?|B^^h1S#5*p| zSWxKq&^<2`7ZFzW@DZBZu?0zvYPne-TwRZfC*o|lFkfnD?73?dzFSgO-O;fq#b(!t z-wZLroIdx>tNeN~d%KBpvVy0M5hzZiWVJ*#5+SgVm!(N@IQJ;sLFg^Kk@D7UfTY`@ zo(0iR!Ti#|)K6}!;VH&srQ+FlEUb^t9RX`+jO>!mZ(8YCh!W!+4>BC$QO z=6UMhiV7~0wsjg$M439f)l&r=$dk3L8tl;Y9pSW7N?dCZBL%b8{K)$q1!q4TO)JWoFw6@Kv8)M)71PbEw2*h{?uh~i~VqoXGhwn7LJ2~** zz<&$mzo`s_HYol;Vu#g};|&X%@-k2IUWA;Zn5mjmn>4G!c=`i5F*zR)Yg;O$xP0epFvFwaXnuKd#8Pml2o7tFFhL?TKPSVBa`W(y|9K-J4b3MPR;lSk_ z)n2GQ$E6&Z7W0l)l8GB}r&lq#RLnAQr5-}{BzmX6QLU9&!0HTpzk~8@4P(7`VH6q_ zeZg-k=fH<*J>K=(yXeqI=3xg87#n=Ouim4UEHEvHeT!Y$^RvjkjG8(;^`7Hi9Bp~k zzcB3`#c!IP=1V&)pVngR>ei=x8QI-e+Twq*35BJci1(|P8m5ifYN*aK@SIOh}ZO!(L3ryXOs@V-f%zCnfYbe%HKv~ zn*Zg$3+BBms61h6CY>RWb%^>wlKu0ravj zgGEO6oj*Vwq|0ZUs?nA1I}e&RJa!^3_D!pCslrO)p!>_Yib?hdl>$2Zr8LmucZD-h z`{WsEbN@PTkNv#MMumgr!1+bd-#{jg(R)ws+@^2qu>VOf0WN^{l*)kf6{&n0iP8hJ zETu+183t)Ydgs?KcD8Cx_0{6{{e#qq6<{g@$MpkLNTGBZkg0#N<*+0`ID`2}!>uDa za8Z)fePWdIUpgbD!OG!qkl^b@A*wS@_=iB*hhi4 znl}iMdxSUqQvWIKpfyRse~9M^ztw9(ma;aRzr_Au3-CXa`N!T6k8A{{NfczZ|FYQ* zm%li;YPu54y^&Tj-J=@;Shc{`YaY+Co2d~v4KQ>?Fyh3~;Mr9cM4rU|8#-WsgV39%LF|9PE8Tn(u2@l zjZ7qBZUgyJgyj(a4|DuI!YV5H=_8jXFNnXIDprizdQsC23N%VOX1gd zG%&z<4UoQ)yOr==B6Wk?t1%UjA;3RC!aDxR> zgaw#T%s2{)ag0Ejtt9}h&{6>=A`m`;H-Q)MH*aYHH|T<*PnhXo_{?O$T4Esa|8P;I zFT)DR5dX}IZKi$%RPD#x5Cg zV}4G4WJh0elhxQRk$a7^c-wQF0UK)Fi4V@Uy4CwR9z9~kPO)Jx9KXabH_5X8;v=Ep zBa!>0akY!;)}~K5!G}$8$KtO0+F?CW}f>;PSb-7+? zLW29L<5hVlHUAOTOrl)xj}s#>qc<4?#;jSjOhAJX67fGKi%rpKkq~M|4Q`* z#4V|7uU=}quko*!K>cLuCd)9r>$P*|TTxP!0oNz1x%iutxcObWOChi3`AiQx*xVif zZtYfDggTGsK3a)0ji}}{or-B;2NZ|3NYrq8ICiNZVoQ>> zx0RqV?UnwY6iM{D(zy;VDdJ7f+!LXGl4F_EUSc&gOAUEPjL1iFzUK=KwymTixo7AM zMafb1l^F%cF1dB|+&PVT=x&yi&7GC=on}(v%`42>=dQE7-v()xRwut3#;E7=9q&in zWs!3dW6|jN)h2R&v3wXh$LG?`-(eV&WwU6f*^F_JbqQZDOYWtZV`z;qy#tM@@vHyJ z+rsRqY1rRBnt6u8mFr<8(#04{IfALRa2##JCSr8<3i_kD8a<9 zP>WC@&;^bQwFgGe#xhmKlGAYkc3ViV(i~-L#FaRrA2c|O{3>{TX<)<)wm^9_v!%=n z13TWfz0x!EpNxX$QAYGQ2{1{JCNoyE$U2jcbq8SK?1!F~Zdq|JBhgl^h+0bTN4s^J zr^Ca0>;VB1WI&Jm6&?4>7^~A?b!=Sw*$41PE3!m3!bkjGkvD0Qpl6=#|N6f9Y%8zV z8~1XS0ND_}y7+N2e99w(LN*8szklLsNyBQIu=aA9{ABp9w69WC+`$`A6FVWgLh9lu zQpuX$GOr`pFK1*8?^+$@J0^~y6*vL0nZ%A)X1^KYgmkfXm_jIuAL+t`AVAAOp<0YC+FMdX@J$B6V1J$owy+Se?#uO;n<~=-%4ZSxzwYH46A&GyXJa zQ&+H5%)dp1F#%h2fj@Ha6C?LZIZlAPV@_loC3T1PD4pw?;pv0;)UiX>m}k08#$QgB z7~p5$mRb?B2T#-GXHPtk(O`-uR$7B(1HyycAGzczoq7aK_NqDKMdxZ%d8hcAjHOKP zhY9dcW}@Zzeb!eUy=cG!LNXL%Ey7Hq_jLEB5J|FK1@rm$|c*Z%1NjzUe=oF$v zYUQ4g(PRV%x359l)hIoG({7=)WndEHDM9rWAXrkw z^6;-Iv68~UH^IjNK%1%S#_7NlRmau*m2G+>Alxf%$f>lw%5(JbZCavlOSqz~0tTAw<8e^YqbIR=idtO8v?GHeVHVd%1~AXgW)3`6aq^Kp$U`qR!QyX4 zW3{|Poj0#*TGS4ky?3DNYMXtOA)VYi!I86d>HhrpygSP^<#Pho6(71 z#}FgV53~;sD@RJd$xYlUe6g9cc}FSz3!(>GBFBejYhbX~N9!uQbv-NAHHImDN$g%6 za#xz{uf9_qZMkK&5r+P&rz|@0Qkub@J3797;c*ieS2d4IY<$k~liUp|3`{}a|A|3X zd{jXiYG27#!tUu+`n0|xI?e7h^`!|oHJ0HB`+MV^ioX$(QCf99SBTxqN@5tfX-hXg z0CU{=exT4f1lJr=j@C1gxCeiUPF>?*B$vrY`*>ys^ZM>ZpFi~^-hAb`5!sI52Yyy= z7o6c|x#Row0N)D^{s3oByEt~$e{w@}^U_Y@B`K(}?$*6SqUS%!C-#1hv1DLXF`BE# zH{s)@mksN6Ty1?iO?m#Z45cEHaF6tNTMp6aM$zdDlJ_31X(6>2*U82)@@&GvZ_H7C zpALUMr%paJlgnQER7>@~+nh7{g>3eL<%%i?^YX_VC>Rg1CIJOSvCQ*lWev+Y!}HD3V*2^eitN0^SqK;_uFm5g~lX zr@ZlO%}d0nw9sAO2~@aQj1@{EHh-wE6|X5g5mWsD9jhyx7T2@W%ZH?G*d>1+vTNBX z1?({FZ^pWot~p1KmBL_~9N{f-hbp@!9QjK&581h8)Pfy)NOu!oC0aLt!4C0jpLjP* zBKt}qql31L5VdHr*tj0mI5?~uTb=e8Tf%~~b({E4cLK!1YAJCNlM zbEsT7&_lBE9kRGvQ1oE9qU3};tj?~xVh}bvZ_h8AF^^(R|;}CZlT~lJctN?U>o&1 zdr7K2Ft>|AZ9x2>#*V7to!Rt_v-Mp|5}Y&bhoad=d>)PlvRmy9Wjk!zY3X-8{eFwhRq{ZRj#JvT?2vw5*!!F`DM&s=?!;`zlf2Hq#~RHQO#fH@(bK+ui1k6&h4k!wG8DeK1q`t)&O!8Oc=?;H@%D|sDF)8`<>jZocp!s6!viG zuwA^V0I}#zeSfhDIPM!R6Re6))bl=T`z9l&B06zo^RDrqj(w*(0@7jwzvE7;*p(iu#Fq+4VZzc*El z6XBAlkYL}Z7CobC8w*9?2=z}IZri_o3ibbNC2b$}X+=mhF@tXRc!U>gg3hc|jCyB04ZQCf*qflZ6^E|i30ZJCj_Vc1QVR(`0^Z(Ih$LuSxM;K6wi zveJ4Cu`fpTrSzrwcu0YcTo}(%_l5`H5A9Ee)JF|olwoaGA%#=sw$?~SGt>ZXKf@8B zZxJn|LTFGt$ObVILf$`}WM>0flZpRx8OC#HOP)7_KesEhRS?-;P*{4R$q?Sf0)_HSQYqemjb2DtD299L#aa z^K4D5S!t1#7{7adz2Q-tvsc{Je$l;N#-%e$*v=dNU9y6qbku;movm?6L#%M!Wt0&u zT3x{;%COs<5I1e`v_FdESuT#CwTyQm5yPbFH;b_OvKu8>XFJ!t#oqvs1tF0xBO3DL zdfZT@WzfepoDa{&`S7T!B$F7C+oO$9hsU9XDugmqAfr*r5NrY(>R*M-!otHpkJ9r# zPa$)*mRQG~0fGjo;xClR^dlf(nOTX93@MNmnZ7y%OvgtjL%LD6^kh`g4wXIVrmm8q zhO}4liZkTam7bWS7BG_Vq8Zzvp|)C@2&6B<5@jSQs3Nmgel7oy(?-+JbrK>;l__k_ zVOi!VM}wG=Cz8CCPeXx6!3Ef;A=%;ai2KU-n<+YAoV{OAYw=ICBGuiP>t~yzLHwst zaPs}AxP{&y?$Eta)=Twz#Q}AHOAH$E4jmB4vo3+#u;C}V_YF%vXNy4Y@pimJ1Q%9) zT3Ue4U^-vMWoSA^7s-<{$jkS;=v6h60CBH?esR&b{5W{x3?IRXiV)~=x9XD*j8^@F zWql`tOosTOiWe`qi5Ije+Hm2PXPX;w9E}+*G)H~>9CwgRLA(a(pP?#d9K_6jXg7kX z+0RW-<8vEsw$qi;);HLF*y;k>*wk}rJFmeDj~;g?Ohjy}YO;wBeb_S;9kEu4r9Xl- zB-{kjCZ%mq_UQ6eH&vD?T4!dLz-qE}>Z3>KDC;{5H{#+K^yVW+%W}6XQyApXV+f(d zlo7SaWYOfwq2dvZ!eR|yp5b5SJOW0xUZqS8alCRQ`Yl_#-<~gt(c$|?jPt&EO+smS5*_BVBXh=L(4kuso?#e*M*ov*Y+4W6X3amlEb<#pt8RL6iN zWX3rS(UN_^wf4d?TK%b1Rki+LD4OgxeoavlP(RH2HI~oP9+kc0dR_=J2#bG-fk(@~ zZO*(mI&$WaAFmiD9?#UbHOSBx4qht<#;@kqdIaNkWEc54@j~95E){5R8zN$Y%cw+3 zQ-83qyrBNVz5BW}dUUktS~VX}OJLg5^g&$*F5%M%BC?Ur0cxFJ@gfxnZ|3!0K1<{8 zmJ9`U7x|kDP*+u8JZ#i;%Ti*v8Rj;VZ&auze*3mS%6;FWr*1=P)iBqZ!e)tY=yKr{ zopqL?^SvIcB^$LZhzhCJxM8)UtQhIjWbu8}hJr2#)U+I@COUPScLoG+gtShkWS!T&l4iu{J-kzdNXp5R zb5YEzZ7uY>Arav4(+PR}Xly>&teyAKQ#j*P0n9D7j?$G{B>!0w#rjwCn>VP?ojwBN z`(^#L4?~6?_8gq_ANfx2XLFOPpy@9_=OxVc*(zlEk$2b*nX+q<8y(fID%z)xESjd# zdpCwXZFirF#R||q_N7e11~ELW7ic_i5ltys@w?w|Y=?UJrtjB9%mf2pVu}u@orYF= zRKdE}`WzJ~C%T45C#n{N8wD)5Ig>nFaWZCqqGX9M_uObH!`^r$d};0%bWx2EHi8k) zSK+qd&8nQK>^XJla{KGxLT}MkZV)XQC)(ZwIadKig5&!L*MnI3@8%Lqmf3cR1-GwrXUf|Pj z*E;O@y|p+V^bA-SX}m3+&FVoPE*uhyW$Gx6oyj@pY9tVIJwFnj_T@%pUuX+nwpN@8 z}VNJJLrfNd6anCT*juQv2}1~qnfHm-*E zBYg<2nF#@iYU+mjRFvLx0Wy2}&}mDl{^%(^UjZi4t59Ql+k5{I|DboGT6IvQ6MX&S zRPF=F^m@{Re&JMCDUg{2`&GDs!l5Jv>QBCV=7JtA95*N&%2lDHiy*n0F2dt*1=3~d zI`pZ~U3$*RP7Ca^HAcU)W@pq#hP1%Lg_x09*w=Bei!MYCylZAxtH};I6=7T{?Cb*B zi$-Q>qd%CG-*>;ZeZ%SeFMP-i>6{IH2v{0nUD&zlh$r1N<`XqMn)>sRyCY78LvWEz z-F~}AT~qB$d=tEbaW43ry+T+2SSt_`+A!IhCr2}am<`1BdZ}8jJ;SP2Vx=u+NDelY zaS4V7cH~@z@g0Klr$(QmpU7)F)_m|0Lg6gyDt<$;KPR6|e7SJW%5V3GFQ)CypP(NX zvngCM|B01uo*exhS`D-i|H(xF!Typ!i%YmUe{hJ%n*HfZ$yuK|HyG)k%m!W5g*?wF zgN5$ClRQrYsKiy?zG-PWT8Jy36jkR}{qXNzA1-v>WoU8ZLK2FSO!=baH(hI*`EFcW zSyonw=_Xq95B^Lqz6ts|T3a$h@F!Ly7@C7WOxM|Ku}M*Oiv_WA9l?k*zgS+O!a#=F^uohaiXPYp*yTiplI^_&8qcT```&l%Gw8Zd<<5OLMs=YBUM+rVXIE#^OZU zSc)~oy$C8?sm5Mpp@hTjm~ABCOOhN~=s~vc`#vU^ye4R^XM>)gucpzcdL2GmrWur2 zb8>=rM7z485`*+!4G7ew<9p?r2dYcGz9(?5`fdn8Va1D7VfKc8nwItY5qKhbb&hc{ z4yqmqhsDMk6zzVIHBwqfB8%V2>Pz;n+cA7Jb;+q32;Vt8ag%4-uOz_P*-9Z{4y9Hi z_?~_y+)=!EyC+mgP&P(u1?1#2R4^v9RQsgiV*f*i@#i|3@X-A|{69lrWC046?{d8| zJHxS3_5G$CK_?b(j6L34KSiK+UM7qZ_Ap)PoIT~kybO`xrpmjn3OpzXg)*f5lsVk4 zvMBE18!Yo*XIoK{++nv^<1yX>olEDekI16Q3=t8Sf%Eryb3G~nmn#QYYOfVlhID1h z;Y}j&I3#Oo&fU3#^#04-^w}mvpCmH+i}IGhw3|dzve%SS7X+|70!1VVl2`Vq&z2xo zn9*?q7i<*L&oY#bOVN%*FUSRl7(iNkt~^kkP+NWF@Nrjtmh|`F&9%>t%6721LF0|a zMZteTGZ<+s-D8JueU;m!0D73BuKi(fRoL^N%>H-oNhz7On^@(&?#a__f4s6-qYs}J zz5KLNk~J)pjM$qbV)%eZ%2!Np8>0^v&OR*-!eXWNTkZbt-UupZ>awI8pWqt)Wo$<) z%Y6cq%$6a}mGOPbayZN~`JEOWi|N4qRz-KqlBGnd_yWF6q`4@b@Kq9Y1riioD3y~W z%ax;-$;{%zqib$g>fo{UguP6Uegwt%!8n>b$QxAQVurA*k=>%$z=;{^PS5_yMSk|A@eDSl=W zS)0e_-AC(kGa}a=YeB2X2B@-J&`xsi(P=gzdWN?x6`K@{m-9IJIU)-h1KBXcFU5jDdQ)(^=mcwZb>6nHdEo%>tGBl zGx^Ke*DC#v12v@h40{G+-^jh>mAv%dqt0_J7ZVP%a|MdHHx2wa&hkUG#&Chceom9P znYbvNXEirTkVepg z!Zj|r;ij$6;-Z>zv%hQlFS)iB{o6wT21A{`?t8!bzyCfk^D$HnZ(mqy_eeTK@0a>F zJ$LwxRGIZXo5vUsAD^xH9q<7?E(z^NM|OWGJg!?m(M$E*9sZQULP{No1)9#zyt~}@ zpW(@wLvyxJGxkaJA?$J*KFf^sqdM%!JjDHcCtRxG7QD#Q&#UuGRkD>vuOuxwSWZV& zR${r#QS>eOA&7kkoU zaNk}bgNv=^TlboX(zgS!5_^?e6yACpkYbDZ*7*J-iVjk(+i-PK-VhgBxUQ8L1G$_G zITCX}2VNw1toQOQHIV7|?)LEBIdXXu)qov3jG~_lZjzb(DlW59YPlb_hx;Z=;;SJM z`E-N)t4jcXOmh$74jF!^Q%$S!aghw~j5k@HclrB3f}M2co1sqlkEj(V-3~7jH&1l3 zbq|2RmDHVrTT;m&)(uTu`VnklN$^EdA;-`S5 z`fMM&+Q0q^C{NhqG_3ry2v^PaJcv^+4wtV?MYFeu@VG!zD5tFEJq)Gv!bJD91k(C|xq7!++@neYan^S765?L#PuWt%0{iv0rBNrpdIP-M& z@rp>zIk#^2fH?s{3{UGEX@)*@7GBkm2l`_oV;P7xT5IUXqeM+D-4QucVz{EwE28ca zs#f@}jypBSHnP3mUF>{`CJ}maFop0IHL5ScQu1Q>U`36-AhQ-E)jQ+B&l=xQ%U}8s z?h42BGLd&*2OPT{CCK;@Bs}4iOJfpeDq%3FDFcc(Ahq4M3%E`iMRg5E?T({bWAxPA zGPAGoY!$|`xs}|;9lN>f*=V-Er+Xk5#U~#8nV1@^50Mo&Dsbdq(&2S0T+8nk?EEhA zrpe-gRo?3uOcjH9d<~PTVby1aA?`t#jC+!fyW=FzQl9|uhA-#*FHh$#(=W%*j-^{d zQGl*UF5+$UAU0=^%{TtYbw{q|#`FFm5c$BVdDF&Pf8~w}JdB8}YlsE5h#i3;x>SHpItY?5#Q6JflMi zJOzqUYR#Y+0a5*Uc3v5{r+D1wotTHQRTTciu7p*}^;*qLq~`wCep=SypBGU?8CK>> z!qB`}NuWm*dy7oC(oOL3uf+gt68CVDPi(S32YM~HRP~f!_qbblMk}YXGy?<|p_+Dx zHKg5QRD;1*8kY)-823t!K{H$y5-%MXgn{B{-yf|)ueJQnB+O6_Mv%bLGA&X1h_gXE zhF;y_FE?ns+*1k)EuH-CCO=E!Km8Gl-LxE)tzZ_lv2S>D49c8R`D+=7kfGrO$PHN6 zRC%(A9j@p(;oEQEi=@}uDqP;uo#D)bwkZ#!T&h2cTi}x%R4E%IhpR~MRFw<2ykTas zNco6O`d2h64oV9Wk_~sr#-|d+yZj&&Of{sOZ(ph;x=Lxfl~5XQv3;uP&P;gS9!o7H z+IKI{9z_G`3Jt(5bw2h4b`AGy9TlP}b#E~ch8*|2lF}PiN$+#bCHqv&rJ9SIceKnIlzDK7( zLWk4P2x3&v#sj&7uNV>W<-wD$w{@|8&&^QuIO0-_USw$I#fb=pE03o5=#T?FaH&X# z6vG!8R5NQYyAy9t7KqYuAAJlEmlSCD+G;9+h-5oMU#B_ zqu{5Mq`rQod9NvpVHqy3Sk39+L^#ViD#h9ob|ylIbRTF{D{zi3kmToG86hO#X!#~> z7R8V(h}a%=7Xm(Y9Ug&KRD(bkDkm{PFFwh^lJXn9Bx#m?CCM5$H(-yR%g6*LjEQ!} zv@aH_;o}x4up;J3mhIW%tUomB{g z!k2DnC$E*NcJ+>z@+1TucrCcVX` zaZ9>HXUgq0?o~C9p+D#URn3(LHFa+Bgn(2bKwTg#id9(^8X%EK1quqv(y+)T0BVhk(Skv1oZl-BE?R#!WJH9p|j77%Kn-Kg0 z9Lbp7zgEWR%~m?J*l0pcBaKvIF+_~9>b(OLuf>q~H;t~ZSZh|Tp(w3^xmfwf((;mk zmE)Ch$9L<>ao#9}I1_t;#+I?)wNO+vY@oVO3n5met6gc5dK&?R{j5ZZ$$#bc*4nXO zL38ygM#po2l8c>txqZF)d4V&@c(HrL*y;U?Q>+1eut@vSXD`usC>Z}ET&X6<>(S1N zt|;4qM=vv66wem-BpVof%T_!UitVmYCz*Xm%te!4Z64>>*D{Z&6DvDX`6d$e#_)7p z|G^nzVNHvVsebuNz-t%WS-CE%U!av^lgtk0Zq~kW<=^cu4dass1{VIQmF(4crxJ%S ztLXtMx}r8uftSX=7R?(CXSkewdx6W;>}w=4<4mdDg#NgV)^T#n>A|2cwETexi#jj~ z{G!baft}pXsp!lm2hmrnCEGaJz6dErE1V`*oR0})?12p|O*3cvKW)y6uYGcw5U&a2 zq^Bj(zeS^NN|?@^64vtuED9NRgXZUt`BUR59al`3pVlRF$ufJKHG5ZAfxhkvu~4rkQ7z_s=XC$C)KYh?Z|>5`m?}I9x<#U$2oSw$AAlw^mojetwW*{^rrsV?j!OvUpXMTvaryfOyFx0o0b2}sV4Wq#|{Ia zz@z?@Gg7o9pmYahIFOn|NqQ>l2T0!yQ)rQrq?;NsGJob~OS}GC!dLREGDNvRF|bLv z$=I9znZDhkB(4PXlk;QsLgaoYIU8O1Xm#N8NnDXig{vX2Z`pbQ)roHc(|r2?KtDvi zn_<1#_k{NaZG4A(H1gu&b&r-Ox5RfJ78RK~An^f|(L^qJwr7^->sLQyy$K(~wc|~f zMCF?N1oi%po4DE=9e=J#1kkI%r}mw&56AsclR&ixIE?X9#AYZ=0R@&Ymwf;rn*ewv z8h|tb&_5l23G2i?7En)xQ)b&H_C4=}fyFIYPx~IZRe~bGBiV@twV!*v9mLt|{iVBg zj;iW__Ne<+%iCN~&`I@s@*z;q!Z+1$0=6zS<21pL<#(q^vlE|aM1_`bF!0{Uvebe+#?D4X+d)n8brtwfe(#Apob3?Blh{2MPpPw5t2GKx zxg6grY;a3Zy{=qR>b)F#8O6+e|9)D-SL(AOXZGc-cYAo*8BB*!Y?Y!)?#&|t5xcjZ zZRHNZ#(`?U`#fhI0GtZrjpB#SBE(r`zGvTDoeo2-*ST2rzqIbP(20f)3CB!)nvp&> zUyFRVq8Zx|eM==d4%8Ei+(6G&4gdvEE$K7qmXj^dlGR2VK_~M|)$ovXofTz4P_q&dH z_16ehaud{{`R(tGcjn#>#b1BPAyussPE=C|k=kC0*_m^+=qW6U6-mET!u2lI$D!pKe)A+_8quX$v0rJp#;0Z=s}VT+wdA# zKJak=%iAu!{dk@avtt|h_qW{#(VeE@$;P6c`QN)eC~?T1Lx}SGFEFN zu^w4h!i~=bgHpIRq2(lBQr&?XUV#cNUpqKNzUMJh8kB>IE@)s4-918MYvoU;aLr1q zJTS7|9e96tikcxmb>ekxuIth4)lPCiek!rPNv6T!M;Uopj~}YIj?=cDVLY$iK-fig z`i{}8W0#x0)CH!3?)yMoeog>M$x4v*V^SRQZ_^QCkn}x%;%X}aq}&zVF5*~%Ql`F< z1ja<06Z@7vbr^Y%%4BP{5kSQ}xAv4%ijcemcX^J4m}D(LR#zagPeFSJE$K$Yu}KlR zNIus?_>;oTy=U}-pPTsHXWl+ZU(wpiuB)kf;`n-C_)>7rC|9QrO>BX;VY4z6M6w6% z?@7norsyCl3yW-63}?H7&9W2*9%+6Oy|VyUplwI?-r&S%hK`V*IkRi#7!%eb6Ky9e z*NV3wN;+qxij)m&nNrx&IjgI}$a7k9)gJ-Rr|Q2qX;F7)FkR+$a`g8I^C)ymdQqWS znCR)%`hY4#TGR((->Txb4>T0kh6YBoY5Bb=^0{nNAuRARWymAEbI9klg_L{tiXJDb zT4sa#Xx6B4g-sLsOf@+3iOR1Hhq zVe*ksS)f&ghi}ill%C{$KOfuY+S>^$n{zN~+}+KO^FB1#pn_xt_6Pw3nYaF0!M`?$ z#}NeE;GeP%gD2Dox|Q5ltb$>CATTWlzOXgU-Tac%^R0wBv8|<}r0?dySgO1((KP4AS(~^(^%ra+T`=P03 zf(jVaCjiEbk|xI6mVKpxu7c&6^^~?&%UH5M*eegXl2WA3Q}~+43w0)BK<&lx<1h=+ zWGtjD>61V}qpJ*FOb9)YIK95Lz&xWFzxs2;UrlZQyO&9r*zzlziu4x;Z;An~T9Gzh zk(UA@gUy1j1p^O|G29qw05>*(Bi)Qnm>D6=j7*Qh;bw4n=B`QZzYyTAUc!XL{yhQD q)5{ki(EUq7BnB6JJu(1?|2H(K&qAzs?Yu0})y@Wenr!X={XYQYbVcU? diff --git a/internal/framework/runnables/runnables.go b/internal/framework/runnables/runnables.go index 4c8aac5460..d960475008 100644 --- a/internal/framework/runnables/runnables.go +++ b/internal/framework/runnables/runnables.go @@ -34,33 +34,29 @@ func (r *LeaderOrNonLeader) NeedLeaderElection() bool { return false } -// CallFunctionsAfterBecameLeader is a Runnable that will call the given functions when the current instance becomes +// EnableAfterBecameLeader is a Runnable that will call the enable function when the current instance becomes // the leader. -type CallFunctionsAfterBecameLeader struct { - enableFunctions []func(context.Context) +type EnableAfterBecameLeader struct { + enable func(context.Context) } var ( - _ manager.LeaderElectionRunnable = &CallFunctionsAfterBecameLeader{} - _ manager.Runnable = &CallFunctionsAfterBecameLeader{} + _ manager.LeaderElectionRunnable = &EnableAfterBecameLeader{} + _ manager.Runnable = &EnableAfterBecameLeader{} ) -// NewCallFunctionsAfterBecameLeader creates a new CallFunctionsAfterBecameLeader Runnable. -func NewCallFunctionsAfterBecameLeader( - enableFunctions []func(context.Context), -) *CallFunctionsAfterBecameLeader { - return &CallFunctionsAfterBecameLeader{ - enableFunctions: enableFunctions, +// NewEnableAfterBecameLeader creates a new EnableAfterBecameLeader Runnable. +func NewEnableAfterBecameLeader(enable func(context.Context)) *EnableAfterBecameLeader { + return &EnableAfterBecameLeader{ + enable: enable, } } -func (j *CallFunctionsAfterBecameLeader) Start(ctx context.Context) error { - for _, f := range j.enableFunctions { - f(ctx) - } +func (j *EnableAfterBecameLeader) Start(ctx context.Context) error { + j.enable(ctx) return nil } -func (j *CallFunctionsAfterBecameLeader) NeedLeaderElection() bool { +func (j *EnableAfterBecameLeader) NeedLeaderElection() bool { return true } diff --git a/internal/framework/runnables/runnables_test.go b/internal/framework/runnables/runnables_test.go index 7a9b8968ba..9f34d9ccba 100644 --- a/internal/framework/runnables/runnables_test.go +++ b/internal/framework/runnables/runnables_test.go @@ -23,25 +23,19 @@ func TestLeaderOrNonLeader(t *testing.T) { g.Expect(leaderOrNonLeader.NeedLeaderElection()).To(BeFalse()) } -func TestCallFunctionsAfterBecameLeader(t *testing.T) { +func TestEnableAfterBecameLeader(t *testing.T) { t.Parallel() - statusUpdaterEnabled := false - healthCheckEnableLeader := false - eventHandlerEnabled := false - - callFunctionsAfterBecameLeader := NewCallFunctionsAfterBecameLeader([]func(ctx context.Context){ - func(_ context.Context) { statusUpdaterEnabled = true }, - func(_ context.Context) { healthCheckEnableLeader = true }, - func(_ context.Context) { eventHandlerEnabled = true }, + enabled := false + enableAfterBecameLeader := NewEnableAfterBecameLeader(func(_ context.Context) { + enabled = true }) g := NewWithT(t) - g.Expect(callFunctionsAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) + g.Expect(enableAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) + g.Expect(enabled).To(BeFalse()) - err := callFunctionsAfterBecameLeader.Start(context.Background()) + err := enableAfterBecameLeader.Start(context.Background()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(statusUpdaterEnabled).To(BeTrue()) - g.Expect(healthCheckEnableLeader).To(BeTrue()) - g.Expect(eventHandlerEnabled).To(BeTrue()) + g.Expect(enabled).To(BeTrue()) } diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index a77db52c3c..2ca73c7568 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -161,27 +161,14 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log changeType, gr := h.cfg.processor.Process() - // Once we've processed resources on startup and built our first graph, mark the Pod as having built the graph. - if !h.cfg.graphBuiltHealthChecker.graphBuilt { - h.cfg.graphBuiltHealthChecker.setGraphBuilt() - } - - // if this Pod is not the leader or does not have the leader lease yet, - // the nginx conf should not be updated. - if !h.cfg.graphBuiltHealthChecker.leader { - return + // Once we've processed resources on startup and built our first graph, mark the Pod as ready. + if !h.cfg.graphBuiltHealthChecker.ready { + h.cfg.graphBuiltHealthChecker.setAsReady() } h.sendNginxConfig(ctx, logger, gr, changeType) } -func (h *eventHandlerImpl) eventHandlerEnable(ctx context.Context) { - // Latest graph is guaranteed to not be nil since the leader election process takes longer than - // the initial call to HandleEventBatch when NGF starts up. And GatewayClass will typically always exist which - // triggers an event. - h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph(), state.ClusterStateChange) -} - func (h *eventHandlerImpl) sendNginxConfig( ctx context.Context, logger logr.Logger, diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index b479b8b34e..5175195a7f 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -141,9 +141,7 @@ var _ = Describe("eventHandler", func() { metricsCollector: collectors.NewControllerNoopCollector(), updateGatewayClassStatus: true, }) - Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeFalse()) - - handler.cfg.graphBuiltHealthChecker.leader = true + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) }) AfterEach(func() { @@ -177,7 +175,7 @@ var _ = Describe("eventHandler", func() { }) AfterEach(func() { - Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeTrue()) + Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeTrue()) }) When("a batch has one event", func() { @@ -458,35 +456,21 @@ var _ = Describe("eventHandler", func() { }) It("should update nginx conf only when leader", func() { - ctx := context.Background() - handler.cfg.graphBuiltHealthChecker.leader = false - e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{Gateway: &graph.Gateway{Valid: true}}) - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - // graph is built, but since the graphBuiltHealthChecker.leader is false, configuration isn't created and - // the readyCheck fails - Expect(handler.cfg.graphBuiltHealthChecker.graphBuilt).To(BeTrue()) - Expect(handler.GetLatestConfiguration()).To(BeNil()) Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) - Expect(readyChannel).ShouldNot(BeClosed()) - - // Once the pod becomes leader, these two functions will be called through the runnables we set in the manager - handler.cfg.graphBuiltHealthChecker.setAsLeader(ctx) - handler.eventHandlerEnable(ctx) + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - // nginx conf has been set dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) - // ready check is also set + Expect(readyChannel).To(BeClosed()) + Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).To(Succeed()) - Expect(handler.cfg.graphBuiltHealthChecker.getReadyCh()).To(BeClosed()) }) It("should panic for an unknown event type", func() { diff --git a/internal/mode/static/health.go b/internal/mode/static/health.go index 4993b0b40e..a0fe4e9b59 100644 --- a/internal/mode/static/health.go +++ b/internal/mode/static/health.go @@ -1,17 +1,9 @@ package static import ( - "context" "errors" - "fmt" - "net" "net/http" "sync" - "time" - - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) // newGraphBuiltHealthChecker creates a new graphBuiltHealthChecker. @@ -21,94 +13,37 @@ func newGraphBuiltHealthChecker() *graphBuiltHealthChecker { } } -// graphBuiltHealthChecker is used to check if the NGF Pod is ready. The NGF Pod is ready if the initial graph has -// been built and if it is leader. +// graphBuiltHealthChecker is used to check if the initial graph is built and the NGF Pod is ready. type graphBuiltHealthChecker struct { // readyCh is a channel that is initialized in newGraphBuiltHealthChecker and represents if the NGF Pod is ready. - readyCh chan struct{} - lock sync.RWMutex - graphBuilt bool - leader bool -} - -// createHealthProbe creates a Server runnable to serve as our health and readiness checker. -func createHealthProbe(cfg config.Config, healthChecker *graphBuiltHealthChecker) (manager.Server, error) { - // we chose to create our own health probe server instead of using the controller-runtime one because - // of repetitive log which would flood our logs on non-ready non-leader NGF Pods. This health probe is - // similar to the controller-runtime's health probe. - - mux := http.NewServeMux() - - // copy of controller-runtime sane defaults for new http.Server - s := &http.Server{ - Handler: mux, - MaxHeaderBytes: 1 << 20, - IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout - ReadHeaderTimeout: 32 * time.Second, - } - - mux.HandleFunc(readinessEndpointName, healthChecker.readyHandler) - - ln, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.HealthConfig.Port)) - if err != nil { - return manager.Server{}, - fmt.Errorf("error listening on %s: %w", fmt.Sprintf(":%d", cfg.HealthConfig.Port), err) - } - - return manager.Server{ - Name: "health probe", - Server: s, - Listener: ln, - }, nil -} - -func (h *graphBuiltHealthChecker) readyHandler(resp http.ResponseWriter, req *http.Request) { - if err := h.readyCheck(req); err != nil { - resp.WriteHeader(http.StatusServiceUnavailable) - } else { - resp.WriteHeader(http.StatusOK) - } + readyCh chan struct{} + lock sync.RWMutex + ready bool } // readyCheck returns the ready-state of the Pod. It satisfies the controller-runtime Checker type. -// We are considered ready after the first graph is built and if the NGF Pod is leader. +// We are considered ready after the first graph is built. func (h *graphBuiltHealthChecker) readyCheck(_ *http.Request) error { h.lock.RLock() defer h.lock.RUnlock() - if !h.leader { - return errors.New("this Pod is not currently leader") - } - - if !h.graphBuilt { - return errors.New("control plane initial graph has not been built") + if !h.ready { + return errors.New("control plane is not yet ready") } return nil } -// setGraphBuilt marks the health check as having the initial graph built. -func (h *graphBuiltHealthChecker) setGraphBuilt() { +// setAsReady marks the health check as ready. +func (h *graphBuiltHealthChecker) setAsReady() { h.lock.Lock() defer h.lock.Unlock() - h.graphBuilt = true + h.ready = true + close(h.readyCh) } // getReadyCh returns a read-only channel, which determines if the NGF Pod is ready. func (h *graphBuiltHealthChecker) getReadyCh() <-chan struct{} { return h.readyCh } - -// setAsLeader marks the health check as leader. -func (h *graphBuiltHealthChecker) setAsLeader(_ context.Context) { - h.lock.Lock() - defer h.lock.Unlock() - - h.leader = true - - // setGraphBuilt should already have been called when processing the resources on startup because the leader - // election process takes longer than the initial call to HandleEventBatch. Thus, the NGF Pod should be marked as - // ready and have this channel be closed. - close(h.readyCh) -} diff --git a/internal/mode/static/health_test.go b/internal/mode/static/health_test.go index 3505479d7d..7246283ed9 100644 --- a/internal/mode/static/health_test.go +++ b/internal/mode/static/health_test.go @@ -1,99 +1,17 @@ package static import ( - "context" - "errors" - "net" - "net/http" - "net/http/httptest" "testing" . "github.com/onsi/gomega" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) func TestReadyCheck(t *testing.T) { t.Parallel() g := NewWithT(t) healthChecker := newGraphBuiltHealthChecker() + g.Expect(healthChecker.readyCheck(nil)).ToNot(Succeed()) - g.Expect(healthChecker.readyCheck(nil)).To(MatchError(errors.New("this Pod is not currently leader"))) - - healthChecker.graphBuilt = true - g.Expect(healthChecker.readyCheck(nil)).To(MatchError(errors.New("this Pod is not currently leader"))) - - healthChecker.graphBuilt = false - healthChecker.leader = true - g.Expect(healthChecker.readyCheck(nil)). - To(MatchError(errors.New("control plane initial graph has not been built"))) - - healthChecker.graphBuilt = true + healthChecker.ready = true g.Expect(healthChecker.readyCheck(nil)).To(Succeed()) } - -func TestSetAsLeader(t *testing.T) { - t.Parallel() - g := NewWithT(t) - healthChecker := newGraphBuiltHealthChecker() - - g.Expect(healthChecker.leader).To(BeFalse()) - g.Expect(healthChecker.readyCh).ShouldNot(BeClosed()) - - healthChecker.setAsLeader(context.Background()) - - g.Expect(healthChecker.leader).To(BeTrue()) - g.Expect(healthChecker.readyCh).To(BeClosed()) -} - -func TestSetGraphBuilt(t *testing.T) { - t.Parallel() - g := NewWithT(t) - healthChecker := newGraphBuiltHealthChecker() - - g.Expect(healthChecker.graphBuilt).To(BeFalse()) - - healthChecker.setGraphBuilt() - - g.Expect(healthChecker.graphBuilt).To(BeTrue()) -} - -func TestReadyHandler(t *testing.T) { - t.Parallel() - g := NewWithT(t) - healthChecker := newGraphBuiltHealthChecker() - - r := httptest.NewRequest(http.MethodGet, "/readyz", nil) - w := httptest.NewRecorder() - - healthChecker.readyHandler(w, r) - g.Expect(w.Result().StatusCode).To(Equal(http.StatusServiceUnavailable)) - - healthChecker.graphBuilt = true - healthChecker.leader = true - - w = httptest.NewRecorder() - healthChecker.readyHandler(w, r) - g.Expect(w.Result().StatusCode).To(Equal(http.StatusOK)) -} - -func TestCreateHealthProbe(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - healthChecker := newGraphBuiltHealthChecker() - - cfg := config.Config{HealthConfig: config.HealthConfig{Port: 100000}} - _, err := createHealthProbe(cfg, healthChecker) - g.Expect(err).To(MatchError("error listening on :100000: listen tcp: address 100000: invalid port")) - - cfg = config.Config{HealthConfig: config.HealthConfig{Port: 8081}} - hp, err := createHealthProbe(cfg, healthChecker) - g.Expect(err).ToNot(HaveOccurred()) - - addr, ok := (hp.Listener.Addr()).(*net.TCPAddr) - g.Expect(ok).To(BeTrue()) - - g.Expect(addr.Port).To(Equal(cfg.HealthConfig.Port)) - g.Expect(hp.Server).ToNot(BeNil()) -} diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 119c49a8c3..c99752a825 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -76,8 +76,6 @@ const ( plusClientCertField = "tls.crt" plusClientKeyField = "tls.key" grpcServerPort = 8443 - // defined in our deployment.yaml. - readinessEndpointName = "/readyz" ) var scheme = runtime.NewScheme() @@ -264,15 +262,6 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register event loop: %w", err) } - if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ - groupStatusUpdater.Enable, - nginxProvisioner.Enable, - healthChecker.setAsLeader, - eventHandler.eventHandlerEnable, - })); err != nil { - return fmt.Errorf("cannot register functions that get called after Pod becomes leader: %w", err) - } - if cfg.ProductTelemetryConfig.Enabled { dataCollector := telemetry.NewDataCollectorImpl(telemetry.DataCollectorConfig{ K8sClientReader: mgr.GetAPIReader(), @@ -298,7 +287,6 @@ func StartManager(cfg config.Config) error { } cfg.Logger.Info("Starting manager") - cfg.Logger.Info("NGINX Gateway Fabric Pod will be marked as unready until it has the leader lease") go func() { <-ctx.Done() cfg.Logger.Info("Shutting down") @@ -351,6 +339,10 @@ func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (m }, } + if cfg.HealthConfig.Enabled { + options.HealthProbeBindAddress = fmt.Sprintf(":%d", cfg.HealthConfig.Port) + } + clusterCfg := ctlr.GetConfigOrDie() clusterCfg.Timeout = clusterTimeout @@ -360,13 +352,8 @@ func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (m } if cfg.HealthConfig.Enabled { - healthProbeServer, err := createHealthProbe(cfg, healthChecker) - if err != nil { - return nil, fmt.Errorf("error creating health probe: %w", err) - } - - if err := mgr.Add(&healthProbeServer); err != nil { - return nil, fmt.Errorf("error adding health probe: %w", err) + if err := mgr.AddReadyzCheck("readyz", healthChecker.readyCheck); err != nil { + return nil, fmt.Errorf("error adding ready check: %w", err) } } From 0bdd7a6d212cd097b36dc7216686747cc11946aa Mon Sep 17 00:00:00 2001 From: salonichf5 <146118978+salonichf5@users.noreply.github.com> Date: Tue, 18 Feb 2025 11:10:13 -0700 Subject: [PATCH 10/32] Fix revert commit for leader election (#3136) * Add back runnables change and call to nginx provisioner enable --------- Co-authored-by: Benjamin Jee --- internal/framework/runnables/runnables.go | 29 +++++++++++-------- .../framework/runnables/runnables_test.go | 19 +++++++----- internal/mode/static/manager.go | 7 +++++ 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/internal/framework/runnables/runnables.go b/internal/framework/runnables/runnables.go index d960475008..8304c326c0 100644 --- a/internal/framework/runnables/runnables.go +++ b/internal/framework/runnables/runnables.go @@ -34,29 +34,34 @@ func (r *LeaderOrNonLeader) NeedLeaderElection() bool { return false } -// EnableAfterBecameLeader is a Runnable that will call the enable function when the current instance becomes +// CallFunctionsAfterBecameLeader is a Runnable that will call the given functions when the current instance becomes // the leader. -type EnableAfterBecameLeader struct { - enable func(context.Context) +type CallFunctionsAfterBecameLeader struct { + enableFunctions []func(context.Context) } var ( - _ manager.LeaderElectionRunnable = &EnableAfterBecameLeader{} - _ manager.Runnable = &EnableAfterBecameLeader{} + _ manager.LeaderElectionRunnable = &CallFunctionsAfterBecameLeader{} + _ manager.Runnable = &CallFunctionsAfterBecameLeader{} ) -// NewEnableAfterBecameLeader creates a new EnableAfterBecameLeader Runnable. -func NewEnableAfterBecameLeader(enable func(context.Context)) *EnableAfterBecameLeader { - return &EnableAfterBecameLeader{ - enable: enable, +// NewCallFunctionsAfterBecameLeader creates a new CallFunctionsAfterBecameLeader Runnable. +func NewCallFunctionsAfterBecameLeader( + enableFunctions []func(context.Context), +) *CallFunctionsAfterBecameLeader { + return &CallFunctionsAfterBecameLeader{ + enableFunctions: enableFunctions, } } -func (j *EnableAfterBecameLeader) Start(ctx context.Context) error { - j.enable(ctx) +func (j *CallFunctionsAfterBecameLeader) Start(ctx context.Context) error { + for _, f := range j.enableFunctions { + f(ctx) + } + return nil } -func (j *EnableAfterBecameLeader) NeedLeaderElection() bool { +func (j *CallFunctionsAfterBecameLeader) NeedLeaderElection() bool { return true } diff --git a/internal/framework/runnables/runnables_test.go b/internal/framework/runnables/runnables_test.go index 9f34d9ccba..6da01a0236 100644 --- a/internal/framework/runnables/runnables_test.go +++ b/internal/framework/runnables/runnables_test.go @@ -23,19 +23,22 @@ func TestLeaderOrNonLeader(t *testing.T) { g.Expect(leaderOrNonLeader.NeedLeaderElection()).To(BeFalse()) } -func TestEnableAfterBecameLeader(t *testing.T) { +func TestCallFunctionsAfterBecameLeader(t *testing.T) { t.Parallel() - enabled := false - enableAfterBecameLeader := NewEnableAfterBecameLeader(func(_ context.Context) { - enabled = true + statusUpdaterEnabled := false + provisionerEnabled := false + + callFunctionsAfterBecameLeader := NewCallFunctionsAfterBecameLeader([]func(ctx context.Context){ + func(_ context.Context) { statusUpdaterEnabled = true }, + func(_ context.Context) { provisionerEnabled = true }, }) g := NewWithT(t) - g.Expect(enableAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - g.Expect(enabled).To(BeFalse()) + g.Expect(callFunctionsAfterBecameLeader.NeedLeaderElection()).To(BeTrue()) - err := enableAfterBecameLeader.Start(context.Background()) + err := callFunctionsAfterBecameLeader.Start(context.Background()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(enabled).To(BeTrue()) + g.Expect(statusUpdaterEnabled).To(BeTrue()) + g.Expect(provisionerEnabled).To(BeTrue()) } diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index c99752a825..5eb31b3462 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -262,6 +262,13 @@ func StartManager(cfg config.Config) error { return fmt.Errorf("cannot register event loop: %w", err) } + if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ + groupStatusUpdater.Enable, + nginxProvisioner.Enable, + })); err != nil { + return fmt.Errorf("cannot register functions that get called after Pod becomes leader: %w", err) + } + if cfg.ProductTelemetryConfig.Enabled { dataCollector := telemetry.NewDataCollectorImpl(telemetry.DataCollectorConfig{ K8sClientReader: mgr.GetAPIReader(), From 38cd73a93e2b0e8dd3d5e719505c90b840f44ad0 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Thu, 20 Feb 2025 12:57:50 -0800 Subject: [PATCH 11/32] CP/DP split: Support nginx debug mode when provisioning Data Plane (#3147) Support nginx debug mode when provisioning the Data Plane. Problem: We want to have the option to provision nginx instances in debug mode. Solution: Add debug field to NginxProxy CRD. Also user can set debug field when installing through Helm by setting the nginx.debug flag. --- apis/v1alpha2/nginxproxy_types.go | 5 +++++ apis/v1alpha2/zz_generated.deepcopy.go | 5 +++++ build/entrypoint.sh | 10 ++++++++-- charts/nginx-gateway-fabric/templates/nginxproxy.yaml | 3 +++ config/crd/bases/gateway.nginx.org_nginxproxies.yaml | 4 ++++ deploy/crds.yaml | 4 ++++ internal/mode/static/provisioner/objects.go | 5 +++++ internal/mode/static/state/graph/nginxproxy_test.go | 10 ++++++++++ 8 files changed, 44 insertions(+), 2 deletions(-) diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index 8f8a2671e8..bfa21aca59 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -435,6 +435,11 @@ type PodSpec struct { // ContainerSpec defines container fields for the NGINX container. type ContainerSpec struct { + // Debug enables debugging for NGINX by using the nginx-debug binary. + // + // +optional + Debug *bool `json:"debug,omitempty"` + // Image is the NGINX image to use. // // +optional diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index c6420fc2f2..54e5c760b2 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -14,6 +14,11 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { *out = *in + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(bool) + **out = **in + } if in.Image != nil { in, out := &in.Image, &out.Image *out = new(Image) diff --git a/build/entrypoint.sh b/build/entrypoint.sh index 1095831c57..4ccd6be3a2 100755 --- a/build/entrypoint.sh +++ b/build/entrypoint.sh @@ -16,7 +16,13 @@ rm -rf /var/run/nginx/*.sock # Launch nginx echo "starting nginx ..." -/usr/sbin/nginx -g "daemon off;" & + +# if we want to use the nginx-debug binary, we will call this script with an argument "debug" +if [ "${1:-false}" = "debug" ]; then + /usr/sbin/nginx-debug -g "daemon off;" & +else + /usr/sbin/nginx -g "daemon off;" & +fi nginx_pid=$! @@ -31,7 +37,7 @@ done # start nginx-agent, pass args echo "starting nginx-agent ..." -nginx-agent "$@" & +nginx-agent & agent_pid=$! diff --git a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml index 1dd6f44155..f77630fe95 100644 --- a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml +++ b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml @@ -23,6 +23,9 @@ spec: {{- end }} image: {{- toYaml .Values.nginx.image | nindent 10 }} + {{- if .Values.nginx.debug }} + debug: {{ .Values.nginx.debug }} + {{- end }} {{- end }} {{- if .Values.nginx.service }} service: diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index ea0f93d9f5..b07a013fd8 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -79,6 +79,10 @@ spec: description: Container defines container fields for the NGINX container. properties: + debug: + description: Debug enables debugging for NGINX by using + the nginx-debug binary. + type: boolean image: description: Image is the NGINX image to use. properties: diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 2d18df8928..c08c007d12 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -664,6 +664,10 @@ spec: description: Container defines container fields for the NGINX container. properties: + debug: + description: Debug enables debugging for NGINX by using + the nginx-debug binary. + type: boolean image: description: Image is the NGINX image to use. properties: diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index 19a24cb832..6c3c6b5c01 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -447,6 +447,11 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( } container.Lifecycle = containerSpec.Lifecycle container.VolumeMounts = append(container.VolumeMounts, containerSpec.VolumeMounts...) + + if containerSpec.Debug != nil && *containerSpec.Debug { + container.Command = append(container.Command, "/agent/entrypoint.sh") + container.Args = append(container.Args, "debug") + } spec.Spec.Containers[0] = container } } diff --git a/internal/mode/static/state/graph/nginxproxy_test.go b/internal/mode/static/state/graph/nginxproxy_test.go index 2289bb7dab..bf074ab562 100644 --- a/internal/mode/static/state/graph/nginxproxy_test.go +++ b/internal/mode/static/state/graph/nginxproxy_test.go @@ -55,6 +55,7 @@ func TestBuildEffectiveNginxProxy(t *testing.T) { logLevel ngfAPIv1alpha2.NginxErrorLogLevel, setIP bool, disableHTTP bool, + nginxDebug bool, ) *ngfAPIv1alpha2.NginxProxy { return &ngfAPIv1alpha2.NginxProxy{ Spec: ngfAPIv1alpha2.NginxProxySpec{ @@ -79,6 +80,13 @@ func TestBuildEffectiveNginxProxy(t *testing.T) { ErrorLevel: &logLevel, }, DisableHTTP2: &disableHTTP, + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Container: ngfAPIv1alpha2.ContainerSpec{ + Debug: &nginxDebug, + }, + }, + }, }, } } @@ -100,6 +108,7 @@ func TestBuildEffectiveNginxProxy(t *testing.T) { ngfAPIv1alpha2.NginxLogLevelAlert, true, false, + false, ) } @@ -120,6 +129,7 @@ func TestBuildEffectiveNginxProxy(t *testing.T) { ngfAPIv1alpha2.NginxLogLevelError, false, true, + true, ) } From a3caad7af814a071a6981c9eed3581ab6e648c60 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 24 Feb 2025 18:10:02 -0700 Subject: [PATCH 12/32] CP/DP Split: provision NGINX Plus (#3148) Continuation from the previous commit to add support for provisioning with NGINX Plus. This adds support for duplicating any NGINX Plus or docker registry secrets into the Gateway namespace. Added unit tests. --- .../templates/deployment.yaml | 8 + cmd/gateway/commands.go | 16 +- cmd/gateway/commands_test.go | 27 + cmd/gateway/validating_types.go | 50 ++ deploy/experimental-nginx-plus/deploy.yaml | 1 + deploy/nginx-plus/deploy.yaml | 1 + .../snippets-filters-nginx-plus/deploy.yaml | 1 + internal/framework/controller/resource.go | 4 +- internal/mode/static/config/config.go | 2 + internal/mode/static/handler.go | 15 +- internal/mode/static/manager.go | 17 +- .../agentfakes/fake_deployment_storer.go | 230 +++++++ internal/mode/static/nginx/agent/command.go | 40 +- .../mode/static/nginx/agent/deployment.go | 118 ++-- internal/mode/static/nginx/agent/file.go | 3 +- internal/mode/static/provisioner/eventloop.go | 12 + internal/mode/static/provisioner/handler.go | 6 +- .../mode/static/provisioner/handler_test.go | 174 +++++ internal/mode/static/provisioner/objects.go | 313 ++++++++- .../mode/static/provisioner/objects_test.go | 617 ++++++++++++++++++ .../mode/static/provisioner/provisioner.go | 93 +-- .../static/provisioner/provisioner_test.go | 361 ++++++++++ internal/mode/static/provisioner/setter.go | 9 + internal/mode/static/provisioner/store.go | 181 +++-- .../mode/static/provisioner/store_test.go | 505 ++++++++++++++ internal/mode/static/provisioner/templates.go | 38 +- 26 files changed, 2644 insertions(+), 198 deletions(-) create mode 100644 internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go create mode 100644 internal/mode/static/provisioner/handler_test.go create mode 100644 internal/mode/static/provisioner/objects_test.go create mode 100644 internal/mode/static/provisioner/provisioner_test.go create mode 100644 internal/mode/static/provisioner/store_test.go diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index d76df22442..b542aacb57 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -42,6 +42,14 @@ spec: - --gatewayclass={{ .Values.nginxGateway.gatewayClassName }} - --config={{ include "nginx-gateway.config-name" . }} - --service={{ include "nginx-gateway.fullname" . }} + {{- if .Values.nginx.imagePullSecret }} + - --nginx-docker-secret={{ .Values.nginx.imagePullSecret }} + {{- end }} + {{- if .Values.nginx.imagePullSecrets }} + {{- range .Values.nginx.imagePullSecrets }} + - --nginx-docker-secret={{ . }} + {{- end }} + {{- end }} {{- if .Values.nginx.plus }} - --nginx-plus {{- if .Values.nginx.usage.secretName }} diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index 0a572c922e..5652a9f2dd 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -69,6 +69,7 @@ func createStaticModeCommand() *cobra.Command { leaderElectionLockNameFlag = "leader-election-lock-name" productTelemetryDisableFlag = "product-telemetry-disable" gwAPIExperimentalFlag = "gateway-api-experimental-features" + nginxDockerSecretFlag = "nginx-docker-secret" //nolint:gosec // not credentials usageReportSecretFlag = "usage-report-secret" usageReportEndpointFlag = "usage-report-endpoint" usageReportResolverFlag = "usage-report-resolver" @@ -120,7 +121,10 @@ func createStaticModeCommand() *cobra.Command { snippetsFilters bool - plus bool + plus bool + nginxDockerSecrets = stringSliceValidatingValue{ + validator: validateResourceName, + } usageReportSkipVerify bool usageReportSecretName = stringValidatingValue{ validator: validateResourceName, @@ -249,7 +253,8 @@ func createStaticModeCommand() *cobra.Command { Names: flagKeys, Values: flagValues, }, - SnippetsFilters: snippetsFilters, + SnippetsFilters: snippetsFilters, + NginxDockerSecretNames: nginxDockerSecrets.values, } if err := static.StartManager(conf); err != nil { @@ -378,6 +383,13 @@ func createStaticModeCommand() *cobra.Command { "Requires the Gateway APIs installed from the experimental channel.", ) + cmd.Flags().Var( + &nginxDockerSecrets, + nginxDockerSecretFlag, + "The name of the NGINX docker registry Secret(s). Must exist in the same namespace "+ + "that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).", + ) + cmd.Flags().Var( &usageReportSecretName, usageReportSecretFlag, diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index b58fa3331b..e89a5a91dd 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -153,6 +153,8 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { "--leader-election-lock-name=my-lock", "--leader-election-disable=false", "--nginx-plus", + "--nginx-docker-secret=secret1", + "--nginx-docker-secret=secret2", "--usage-report-secret=my-secret", "--usage-report-endpoint=example.com", "--usage-report-resolver=resolver.com", @@ -314,6 +316,31 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { wantErr: true, expectedErrPrefix: `invalid argument "" for "--leader-election-disable" flag: strconv.ParseBool`, }, + { + name: "nginx-docker-secret is set to empty string", + args: []string{ + "--nginx-docker-secret=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--nginx-docker-secret" flag: must be set`, + }, + { + name: "nginx-docker-secret is invalid", + args: []string{ + "--nginx-docker-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-docker-secret" flag: invalid format: `, + }, + { + name: "one nginx-docker-secret is invalid", + args: []string{ + "--nginx-docker-secret=valid", + "--nginx-docker-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-docker-secret" flag: invalid format: `, + }, { name: "usage-report-secret is set to empty string", args: []string{ diff --git a/cmd/gateway/validating_types.go b/cmd/gateway/validating_types.go index 42d24782cb..1db3eab8dc 100644 --- a/cmd/gateway/validating_types.go +++ b/cmd/gateway/validating_types.go @@ -1,8 +1,11 @@ package main import ( + "bytes" + "encoding/csv" "fmt" "strconv" + "strings" "k8s.io/apimachinery/pkg/types" ) @@ -30,6 +33,53 @@ func (v *stringValidatingValue) Type() string { return "string" } +// stringSliceValidatingValue is a string slice flag value with custom validation logic. +// it implements the pflag.Value interface. +type stringSliceValidatingValue struct { + validator func(v string) error + values []string + changed bool +} + +func (v *stringSliceValidatingValue) String() string { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(v.values) + if err != nil { + return "" + } + + w.Flush() + str := strings.TrimSuffix(b.String(), "\n") + return "[" + str + "]" +} + +func (v *stringSliceValidatingValue) Set(param string) error { + if err := v.validator(param); err != nil { + return err + } + + stringReader := strings.NewReader(param) + csvReader := csv.NewReader(stringReader) + str, err := csvReader.Read() + if err != nil { + return err + } + + if !v.changed { + v.values = str + } else { + v.values = append(v.values, str...) + } + v.changed = true + + return nil +} + +func (v *stringSliceValidatingValue) Type() string { + return "stringSlice" +} + type intValidatingValue struct { validator func(v int) error value int diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 88f5e771cd..9ac24a81da 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -209,6 +209,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index ca6be2dd91..6d6c1ca848 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -205,6 +205,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index f452442bef..88b9371440 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -207,6 +207,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license - --metrics-port=9113 diff --git a/internal/framework/controller/resource.go b/internal/framework/controller/resource.go index c238b64924..2fff439a50 100644 --- a/internal/framework/controller/resource.go +++ b/internal/framework/controller/resource.go @@ -4,6 +4,6 @@ import "fmt" // CreateNginxResourceName creates the base resource name for all nginx resources // created by the control plane. -func CreateNginxResourceName(gatewayName, gatewayClassName string) string { - return fmt.Sprintf("%s-%s", gatewayName, gatewayClassName) +func CreateNginxResourceName(prefix, suffix string) string { + return fmt.Sprintf("%s-%s", prefix, suffix) } diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index 19837a780a..d8556e19f2 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -32,6 +32,8 @@ type Config struct { ConfigName string // GatewayClassName is the name of the GatewayClass resource that the Gateway will use. GatewayClassName string + // NginxDockerSecretNames are the names of any Docker registry Secrets for the NGINX container. + NginxDockerSecretNames []string // LeaderElection contains the configuration for leader election. LeaderElection LeaderElectionConfig // ProductTelemetryConfig contains the configuration for collecting product telemetry. diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 2ca73c7568..c22b182e8e 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -169,6 +169,12 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.sendNginxConfig(ctx, logger, gr, changeType) } +// enable is called when the pod becomes leader to ensure the provisioner has +// the latest configuration. +func (h *eventHandlerImpl) enable(ctx context.Context) { + h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph(), state.ClusterStateChange) +} + func (h *eventHandlerImpl) sendNginxConfig( ctx context.Context, logger logr.Logger, @@ -176,7 +182,6 @@ func (h *eventHandlerImpl) sendNginxConfig( changeType state.ChangeType, ) { if gr == nil { - logger.Info("Handling events didn't result into NGINX configuration changes") return } @@ -246,13 +251,13 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( h.setLatestConfiguration(&cfg) - deployment.Lock.Lock() + deployment.FileLock.Lock() if h.cfg.plus { configApplied = h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, cfg) } else { configApplied = h.updateNginxConf(deployment, cfg) } - deployment.Lock.Unlock() + deployment.FileLock.Unlock() case state.ClusterStateChange: h.version++ cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) @@ -264,9 +269,9 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( h.setLatestConfiguration(&cfg) - deployment.Lock.Lock() + deployment.FileLock.Lock() configApplied = h.updateNginxConf(deployment, cfg) - deployment.Lock.Unlock() + deployment.FileLock.Unlock() } return configApplied diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 5eb31b3462..930be5d01e 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -201,13 +201,15 @@ func StartManager(cfg config.Config) error { ctx, mgr, provisioner.Config{ - DeploymentStore: nginxUpdater.NginxDeployments, - StatusQueue: statusQueue, - Logger: cfg.Logger.WithName("provisioner"), - EventRecorder: recorder, - GatewayPodConfig: cfg.GatewayPodConfig, - GCName: cfg.GatewayClassName, - Plus: cfg.Plus, + DeploymentStore: nginxUpdater.NginxDeployments, + StatusQueue: statusQueue, + Logger: cfg.Logger.WithName("provisioner"), + EventRecorder: recorder, + GatewayPodConfig: &cfg.GatewayPodConfig, + GCName: cfg.GatewayClassName, + Plus: cfg.Plus, + NginxDockerSecretNames: cfg.NginxDockerSecretNames, + PlusUsageConfig: &cfg.UsageReportConfig, }, ) if err != nil { @@ -265,6 +267,7 @@ func StartManager(cfg config.Config) error { if err = mgr.Add(runnables.NewCallFunctionsAfterBecameLeader([]func(context.Context){ groupStatusUpdater.Enable, nginxProvisioner.Enable, + eventHandler.enable, })); err != nil { return fmt.Errorf("cannot register functions that get called after Pod becomes leader: %w", err) } diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go b/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go new file mode 100644 index 0000000000..af87b10a8e --- /dev/null +++ b/internal/mode/static/nginx/agent/agentfakes/fake_deployment_storer.go @@ -0,0 +1,230 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package agentfakes + +import ( + "context" + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "k8s.io/apimachinery/pkg/types" +) + +type FakeDeploymentStorer struct { + GetStub func(types.NamespacedName) *agent.Deployment + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 types.NamespacedName + } + getReturns struct { + result1 *agent.Deployment + } + getReturnsOnCall map[int]struct { + result1 *agent.Deployment + } + GetOrStoreStub func(context.Context, types.NamespacedName, chan struct{}) *agent.Deployment + getOrStoreMutex sync.RWMutex + getOrStoreArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 chan struct{} + } + getOrStoreReturns struct { + result1 *agent.Deployment + } + getOrStoreReturnsOnCall map[int]struct { + result1 *agent.Deployment + } + RemoveStub func(types.NamespacedName) + removeMutex sync.RWMutex + removeArgsForCall []struct { + arg1 types.NamespacedName + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeDeploymentStorer) Get(arg1 types.NamespacedName) *agent.Deployment { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeDeploymentStorer) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *FakeDeploymentStorer) GetCalls(stub func(types.NamespacedName) *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *FakeDeploymentStorer) GetArgsForCall(i int) types.NamespacedName { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeDeploymentStorer) GetReturns(result1 *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetReturnsOnCall(i int, result1 *agent.Deployment) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 *agent.Deployment + }) + } + fake.getReturnsOnCall[i] = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetOrStore(arg1 context.Context, arg2 types.NamespacedName, arg3 chan struct{}) *agent.Deployment { + fake.getOrStoreMutex.Lock() + ret, specificReturn := fake.getOrStoreReturnsOnCall[len(fake.getOrStoreArgsForCall)] + fake.getOrStoreArgsForCall = append(fake.getOrStoreArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 chan struct{} + }{arg1, arg2, arg3}) + stub := fake.GetOrStoreStub + fakeReturns := fake.getOrStoreReturns + fake.recordInvocation("GetOrStore", []interface{}{arg1, arg2, arg3}) + fake.getOrStoreMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeDeploymentStorer) GetOrStoreCallCount() int { + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + return len(fake.getOrStoreArgsForCall) +} + +func (fake *FakeDeploymentStorer) GetOrStoreCalls(stub func(context.Context, types.NamespacedName, chan struct{}) *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = stub +} + +func (fake *FakeDeploymentStorer) GetOrStoreArgsForCall(i int) (context.Context, types.NamespacedName, chan struct{}) { + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + argsForCall := fake.getOrStoreArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeDeploymentStorer) GetOrStoreReturns(result1 *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = nil + fake.getOrStoreReturns = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) GetOrStoreReturnsOnCall(i int, result1 *agent.Deployment) { + fake.getOrStoreMutex.Lock() + defer fake.getOrStoreMutex.Unlock() + fake.GetOrStoreStub = nil + if fake.getOrStoreReturnsOnCall == nil { + fake.getOrStoreReturnsOnCall = make(map[int]struct { + result1 *agent.Deployment + }) + } + fake.getOrStoreReturnsOnCall[i] = struct { + result1 *agent.Deployment + }{result1} +} + +func (fake *FakeDeploymentStorer) Remove(arg1 types.NamespacedName) { + fake.removeMutex.Lock() + fake.removeArgsForCall = append(fake.removeArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.RemoveStub + fake.recordInvocation("Remove", []interface{}{arg1}) + fake.removeMutex.Unlock() + if stub != nil { + fake.RemoveStub(arg1) + } +} + +func (fake *FakeDeploymentStorer) RemoveCallCount() int { + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + return len(fake.removeArgsForCall) +} + +func (fake *FakeDeploymentStorer) RemoveCalls(stub func(types.NamespacedName)) { + fake.removeMutex.Lock() + defer fake.removeMutex.Unlock() + fake.RemoveStub = stub +} + +func (fake *FakeDeploymentStorer) RemoveArgsForCall(i int) types.NamespacedName { + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + argsForCall := fake.removeArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeDeploymentStorer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.getOrStoreMutex.RLock() + defer fake.getOrStoreMutex.RUnlock() + fake.removeMutex.RLock() + defer fake.removeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeDeploymentStorer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ agent.DeploymentStorer = new(FakeDeploymentStorer) diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index 31d96143a7..d5be137cd4 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -34,11 +34,10 @@ const connectionWaitTimeout = 30 * time.Second // commandService handles the connection and subscription to the data plane agent. type commandService struct { pb.CommandServiceServer - nginxDeployments *DeploymentStore - statusQueue *status.Queue - connTracker agentgrpc.ConnectionsTracker - k8sReader client.Reader - // TODO(sberman): all logs are at Info level right now. Adjust appropriately. + nginxDeployments *DeploymentStore + statusQueue *status.Queue + connTracker agentgrpc.ConnectionsTracker + k8sReader client.Reader logger logr.Logger connectionTimeout time.Duration } @@ -144,13 +143,9 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error go msgr.Run(ctx) // apply current config before starting event loop - deployment.Lock.RLock() if err := cs.setInitialConfig(ctx, deployment, conn, msgr); err != nil { - deployment.Lock.RUnlock() - return err } - deployment.Lock.RUnlock() // subscribe to the deployment broadcaster to get file updates broadcaster := deployment.GetBroadcaster() @@ -255,13 +250,15 @@ func (cs *commandService) waitForConnection( } // setInitialConfig gets the initial configuration for this connection and applies it. -// The caller MUST lock the deployment before calling this. func (cs *commandService) setInitialConfig( ctx context.Context, deployment *Deployment, conn *agentgrpc.Connection, msgr messenger.Messenger, ) error { + deployment.FileLock.Lock() + defer deployment.FileLock.Unlock() + fileOverviews, configVersion := deployment.GetFileOverviews() if err := msgr.Send(ctx, buildRequest(fileOverviews, conn.InstanceID, configVersion)); err != nil { cs.logAndSendErrorStatus(deployment, conn, err) @@ -420,7 +417,7 @@ func buildPlusAPIRequest(action *pb.NGINXPlusAction, instanceID string) *pb.Mana } func (cs *commandService) getPodOwner(podName string) (types.NamespacedName, error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() var pods v1.PodList @@ -451,12 +448,25 @@ func (cs *commandService) getPodOwner(podName string) (types.NamespacedName, err } var replicaSet appsv1.ReplicaSet - if err := cs.k8sReader.Get( + var replicaSetErr error + if err := wait.PollUntilContextCancel( ctx, - types.NamespacedName{Namespace: pod.Namespace, Name: podOwnerRefs[0].Name}, - &replicaSet, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + if err := cs.k8sReader.Get( + ctx, + types.NamespacedName{Namespace: pod.Namespace, Name: podOwnerRefs[0].Name}, + &replicaSet, + ); err != nil { + replicaSetErr = err + return false, nil //nolint:nilerr // error is returned at the end + } + + return true, nil + }, ); err != nil { - return types.NamespacedName{}, fmt.Errorf("failed to get nginx Pod's ReplicaSet: %w", err) + return types.NamespacedName{}, fmt.Errorf("failed to get nginx Pod's ReplicaSet: %w", replicaSetErr) } replicaOwnerRefs := replicaSet.GetOwnerReferences() diff --git a/internal/mode/static/nginx/agent/deployment.go b/internal/mode/static/nginx/agent/deployment.go index bafdc6ad9e..5da82c7fd9 100644 --- a/internal/mode/static/nginx/agent/deployment.go +++ b/internal/mode/static/nginx/agent/deployment.go @@ -56,7 +56,8 @@ type Deployment struct { fileOverviews []*pb.File files []File - Lock sync.RWMutex + FileLock sync.RWMutex + errLock sync.RWMutex } // newDeployment returns a new Deployment object. @@ -72,56 +73,94 @@ func (d *Deployment) GetBroadcaster() broadcast.Broadcaster { return d.broadcaster } -// GetFileOverviews returns the current list of fileOverviews and configVersion for the deployment. -func (d *Deployment) GetFileOverviews() ([]*pb.File, string) { - d.Lock.RLock() - defer d.Lock.RUnlock() +// SetLatestConfigError sets the latest config apply error for the deployment. +func (d *Deployment) SetLatestConfigError(err error) { + d.errLock.Lock() + defer d.errLock.Unlock() - return d.fileOverviews, d.configVersion + d.latestConfigError = err } -// GetNGINXPlusActions returns the current NGINX Plus API Actions for the deployment. -func (d *Deployment) GetNGINXPlusActions() []*pb.NGINXPlusAction { - d.Lock.RLock() - defer d.Lock.RUnlock() +// SetLatestUpstreamError sets the latest upstream update error for the deployment. +func (d *Deployment) SetLatestUpstreamError(err error) { + d.errLock.Lock() + defer d.errLock.Unlock() - return d.nginxPlusActions + d.latestUpstreamError = err } // GetLatestConfigError gets the latest config apply error for the deployment. func (d *Deployment) GetLatestConfigError() error { - d.Lock.RLock() - defer d.Lock.RUnlock() + d.errLock.RLock() + defer d.errLock.RUnlock() return d.latestConfigError } // GetLatestUpstreamError gets the latest upstream update error for the deployment. func (d *Deployment) GetLatestUpstreamError() error { - d.Lock.RLock() - defer d.Lock.RUnlock() + d.errLock.RLock() + defer d.errLock.RUnlock() return d.latestUpstreamError } +// SetPodErrorStatus sets the error status of a Pod in this Deployment if applying the config failed. +func (d *Deployment) SetPodErrorStatus(pod string, err error) { + d.errLock.Lock() + defer d.errLock.Unlock() + + d.podStatuses[pod] = err +} + // RemovePodStatus deletes a pod from the pod status map. func (d *Deployment) RemovePodStatus(podName string) { - d.Lock.Lock() - defer d.Lock.Unlock() + d.errLock.Lock() + defer d.errLock.Unlock() delete(d.podStatuses, podName) } +// GetConfigurationStatus returns the current config status for this Deployment. It combines +// the most recent errors (if they exist) for all Pods in the Deployment into a single error. +func (d *Deployment) GetConfigurationStatus() error { + d.errLock.RLock() + defer d.errLock.RUnlock() + + errs := make([]error, 0, len(d.podStatuses)) + for _, err := range d.podStatuses { + errs = append(errs, err) + } + + if len(errs) == 1 { + return errs[0] + } + + return errors.Join(errs...) +} + /* The following functions for the Deployment object are UNLOCKED, meaning that they are unsafe. -Callers of these functions MUST ensure the lock is set before calling. +Callers of these functions MUST ensure the FileLock is set before calling. These functions are called as part of the ConfigApply or APIRequest processes. These entire processes are locked by the caller, hence why the functions themselves do not set the locks. */ +// GetFileOverviews returns the current list of fileOverviews and configVersion for the deployment. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) GetFileOverviews() ([]*pb.File, string) { + return d.fileOverviews, d.configVersion +} + +// GetNGINXPlusActions returns the current NGINX Plus API Actions for the deployment. +// The deployment FileLock MUST already be locked before calling this function. +func (d *Deployment) GetNGINXPlusActions() []*pb.NGINXPlusAction { + return d.nginxPlusActions +} + // GetFile gets the requested file for the deployment and returns its contents. -// The deployment MUST already be locked before calling this function. +// The deployment FileLock MUST already be locked before calling this function. func (d *Deployment) GetFile(name, hash string) []byte { for _, file := range d.files { if name == file.Meta.GetName() && hash == file.Meta.GetHash() { @@ -133,7 +172,7 @@ func (d *Deployment) GetFile(name, hash string) []byte { } // SetFiles updates the nginx files and fileOverviews for the deployment and returns the message to send. -// The deployment MUST already be locked before calling this function. +// The deployment FileLock MUST already be locked before calling this function. func (d *Deployment) SetFiles(files []File) broadcast.NginxAgentMessage { d.files = files @@ -167,43 +206,18 @@ func (d *Deployment) SetFiles(files []File) broadcast.NginxAgentMessage { // SetNGINXPlusActions updates the deployment's latest NGINX Plus Actions to perform if using NGINX Plus. // Used by a Subscriber when it first connects. -// The deployment MUST already be locked before calling this function. +// The deployment FileLock MUST already be locked before calling this function. func (d *Deployment) SetNGINXPlusActions(actions []*pb.NGINXPlusAction) { d.nginxPlusActions = actions } -// SetPodErrorStatus sets the error status of a Pod in this Deployment if applying the config failed. -// The deployment MUST already be locked before calling this function. -func (d *Deployment) SetPodErrorStatus(pod string, err error) { - d.podStatuses[pod] = err -} - -// SetLatestConfigError sets the latest config apply error for the deployment. -// The deployment MUST already be locked before calling this function. -func (d *Deployment) SetLatestConfigError(err error) { - d.latestConfigError = err -} +//counterfeiter:generate . DeploymentStorer -// SetLatestUpstreamError sets the latest upstream update error for the deployment. -// The deployment MUST already be locked before calling this function. -func (d *Deployment) SetLatestUpstreamError(err error) { - d.latestUpstreamError = err -} - -// GetConfigurationStatus returns the current config status for this Deployment. It combines -// the most recent errors (if they exist) for all Pods in the Deployment into a single error. -// The deployment MUST already be locked before calling this function. -func (d *Deployment) GetConfigurationStatus() error { - errs := make([]error, 0, len(d.podStatuses)) - for _, err := range d.podStatuses { - errs = append(errs, err) - } - - if len(errs) == 1 { - return errs[0] - } - - return errors.Join(errs...) +// DeploymentStorer is an interface to store Deployments. +type DeploymentStorer interface { + Get(types.NamespacedName) *Deployment + GetOrStore(context.Context, types.NamespacedName, chan struct{}) *Deployment + Remove(types.NamespacedName) } // DeploymentStore holds a map of all Deployments. diff --git a/internal/mode/static/nginx/agent/file.go b/internal/mode/static/nginx/agent/file.go index 35f26b628c..fa604bc16b 100644 --- a/internal/mode/static/nginx/agent/file.go +++ b/internal/mode/static/nginx/agent/file.go @@ -25,8 +25,7 @@ type fileService struct { pb.FileServiceServer nginxDeployments *DeploymentStore connTracker agentgrpc.ConnectionsTracker - // TODO(sberman): all logs are at Info level right now. Adjust appropriately. - logger logr.Logger + logger logr.Logger } func newFileService( diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go index c4ccc2b2e1..5f080156bd 100644 --- a/internal/mode/static/provisioner/eventloop.go +++ b/internal/mode/static/provisioner/eventloop.go @@ -80,6 +80,17 @@ func newEventLoop( ), }, }, + { + objectType: &corev1.Secret{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, } eventCh := make(chan interface{}) @@ -112,6 +123,7 @@ func newEventLoop( &corev1.ServiceList{}, &corev1.ServiceAccountList{}, &corev1.ConfigMapList{}, + &corev1.SecretList{}, }, ) diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index 405b670c18..5885373213 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -56,7 +56,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, switch obj := e.Resource.(type) { case *gatewayv1.Gateway: h.store.updateGateway(obj) - case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap: + case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap, *corev1.Secret: objLabels := labels.Set(obj.GetLabels()) if h.labelSelector.Matches(objLabels) { gatewayName := objLabels.Get(controller.GatewayLabel) @@ -93,7 +93,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, logger.Error(err, "error deprovisioning nginx resources") } h.store.deleteGateway(e.NamespacedName) - case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap: + case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap, *corev1.Secret: if err := h.reprovisionResources(ctx, e); err != nil { logger.Error(err, "error re-provisioning nginx resources") } @@ -129,9 +129,9 @@ func (h *eventHandler) updateOrDeleteResources( h.store.registerResourceInGatewayConfig(gatewayNSName, obj) - resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) resources := h.store.getNginxResourcesForGateway(gatewayNSName) if resources.Gateway != nil { + resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) if err := h.provisioner.provisionNginx( ctx, resourceName, diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go new file mode 100644 index 0000000000..bc3aa61d08 --- /dev/null +++ b/internal/mode/static/provisioner/handler_test.go @@ -0,0 +1,174 @@ +package provisioner + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/events" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" +) + +func TestHandleEventBatch_Upsert(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + provisioner, fakeClient, _ := defaultNginxProvisioner() + provisioner.cfg.StatusQueue = status.NewQueue() + provisioner.cfg.Plus = false + provisioner.cfg.NginxDockerSecretNames = nil + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "nginx"}, + } + gcName := "nginx" + + handler, err := newEventHandler(store, provisioner, labelSelector, gcName) + g.Expect(err).ToNot(HaveOccurred()) + + ctx := context.TODO() + logger := logr.Discard() + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + Labels: map[string]string{"app": "nginx"}, + }, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "test-gateway"}, + }, + } + + // Test handling Gateway + upsertEvent := &events.UpsertEvent{Resource: gateway} + + batch := events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(Equal(gateway)) + + store.registerResourceInGatewayConfig( + client.ObjectKeyFromObject(gateway), + &graph.Gateway{Source: gateway, Valid: true}, + ) + + // Test handling Deployment + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // Test handling Service + upsertEvent = &events.UpsertEvent{Resource: service} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.cfg.StatusQueue.Dequeue(ctx)).ToNot(BeNil()) + + // remove Gateway from store and verify that Deployment UpsertEvent results in deletion of resource + store.deleteGateway(client.ObjectKeyFromObject(gateway)) + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).ToNot(Succeed()) + + // do the same thing but when provisioner is not leader. + // non-leader should not delete resources, but instead track them + g.Expect(fakeClient.Create(ctx, deployment)).To(Succeed()) + provisioner.leader = false + + upsertEvent = &events.UpsertEvent{Resource: deployment} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.resourcesToDeleteOnStartup).To(HaveLen(1)) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) +} + +func TestHandleEventBatch_Delete(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + provisioner, fakeClient, _ := defaultNginxProvisioner() + provisioner.cfg.StatusQueue = status.NewQueue() + provisioner.cfg.Plus = false + provisioner.cfg.NginxDockerSecretNames = nil + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "nginx"}, + } + gcName := "nginx" + + handler, err := newEventHandler(store, provisioner, labelSelector, gcName) + g.Expect(err).ToNot(HaveOccurred()) + + ctx := context.TODO() + logger := logr.Discard() + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + Labels: map[string]string{"app": "nginx"}, + }, + } + + store.registerResourceInGatewayConfig( + client.ObjectKeyFromObject(gateway), + &graph.Gateway{Source: gateway, Valid: true}, + ) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), deployment) + + // if deployment is deleted, it should be re-created since Gateway still exists + deleteEvent := &events.DeleteEvent{Type: deployment, NamespacedName: client.ObjectKeyFromObject(deployment)} + batch := events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // delete Gateway + deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).ToNot(Succeed()) +} diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index 6c3c6b5c01..dc73164a40 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -1,9 +1,12 @@ package provisioner import ( + "context" + "errors" "fmt" "maps" "strconv" + "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -39,12 +42,28 @@ func (p *NginxProvisioner) buildNginxResourceObjects( resourceName string, gateway *gatewayv1.Gateway, nProxyCfg *graph.EffectiveNginxProxy, -) []client.Object { - // TODO(sberman): handle nginx plus config - +) ([]client.Object, error) { ngxIncludesConfigMapName := controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix) ngxAgentConfigMapName := controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix) + var jwtSecretName, caSecretName, clientSSLSecretName string + if p.cfg.Plus { + jwtSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.SecretName) + if p.cfg.PlusUsageConfig.CASecretName != "" { + caSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.CASecretName) + } + if p.cfg.PlusUsageConfig.ClientSSLSecretName != "" { + clientSSLSecretName = controller.CreateNginxResourceName(resourceName, p.cfg.PlusUsageConfig.ClientSSLSecretName) + } + } + + // map key is the new name, value is the original name + dockerSecretNames := make(map[string]string) + for _, name := range p.cfg.NginxDockerSecretNames { + newName := controller.CreateNginxResourceName(resourceName, name) + dockerSecretNames[newName] = name + } + selectorLabels := make(map[string]string) maps.Copy(selectorLabels, p.baseLabelSelector.MatchLabels) selectorLabels[controller.GatewayLabel] = gateway.GetName() @@ -72,11 +91,21 @@ func (p *NginxProvisioner) buildNginxResourceObjects( Annotations: annotations, } + secrets, err := p.buildNginxSecrets( + objectMeta, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) + configmaps := p.buildNginxConfigMaps( objectMeta, nProxyCfg, ngxIncludesConfigMapName, ngxAgentConfigMapName, + caSecretName != "", + clientSSLSecretName != "", ) serviceAccount := &corev1.ServiceAccount{ @@ -96,6 +125,10 @@ func (p *NginxProvisioner) buildNginxResourceObjects( ngxAgentConfigMapName, ports, selectorLabels, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, ) // order to install resources: @@ -106,11 +139,114 @@ func (p *NginxProvisioner) buildNginxResourceObjects( // service // deployment/daemonset - objects := make([]client.Object, 0, len(configmaps)+3) + objects := make([]client.Object, 0, len(configmaps)+len(secrets)+3) + objects = append(objects, secrets...) objects = append(objects, configmaps...) objects = append(objects, serviceAccount, service, deployment) - return objects + return objects, err +} + +func (p *NginxProvisioner) buildNginxSecrets( + objectMeta metav1.ObjectMeta, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, +) ([]client.Object, error) { + var secrets []client.Object + var errs []error + + for newName, origName := range dockerSecretNames { + newSecret, err := p.getAndUpdateSecret( + origName, + metav1.ObjectMeta{ + Name: newName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + if jwtSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.SecretName, + metav1.ObjectMeta{ + Name: jwtSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + if caSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.CASecretName, + metav1.ObjectMeta{ + Name: caSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + if clientSSLSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.PlusUsageConfig.ClientSSLSecretName, + metav1.ObjectMeta{ + Name: clientSSLSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + + return secrets, errors.Join(errs...) +} + +func (p *NginxProvisioner) getAndUpdateSecret( + name string, + newObjectMeta metav1.ObjectMeta, +) (*corev1.Secret, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + key := types.NamespacedName{Namespace: p.cfg.GatewayPodConfig.Namespace, Name: name} + secret := &corev1.Secret{} + if err := p.k8sClient.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("error getting secret: %w", err) + } + + newSecret := &corev1.Secret{ + ObjectMeta: newObjectMeta, + Data: secret.Data, + } + + return newSecret, nil } func (p *NginxProvisioner) buildNginxConfigMaps( @@ -118,6 +254,8 @@ func (p *NginxProvisioner) buildNginxConfigMaps( nProxyCfg *graph.EffectiveNginxProxy, ngxIncludesConfigMapName string, ngxAgentConfigMapName string, + caSecret bool, + clientSSLSecret bool, ) []client.Object { var logging *ngfAPIv1alpha2.NginxLogging if nProxyCfg != nil && nProxyCfg.Logging != nil { @@ -145,6 +283,17 @@ func (p *NginxProvisioner) buildNginxConfigMaps( }, } + if p.cfg.Plus { + mgmtFields := map[string]interface{}{ + "UsageEndpoint": p.cfg.PlusUsageConfig.Endpoint, + "SkipVerify": p.cfg.PlusUsageConfig.SkipVerify, + "UsageCASecret": caSecret, + "UsageClientSSLSecret": clientSSLSecret, + } + + bootstrapCM.Data["mgmt.conf"] = string(helpers.MustExecuteTemplate(mgmtTemplate, mgmtFields)) + } + metricsPort := config.DefaultNginxMetricsPort port, enableMetrics := graph.MetricsEnabledForNginxProxy(nProxyCfg) if port != nil { @@ -236,6 +385,10 @@ func (p *NginxProvisioner) buildNginxDeployment( ngxAgentConfigMapName string, ports map[int32]struct{}, selectorLabels map[string]string, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, ) client.Object { podTemplateSpec := p.buildNginxPodTemplateSpec( objectMeta, @@ -243,6 +396,10 @@ func (p *NginxProvisioner) buildNginxDeployment( ngxIncludesConfigMapName, ngxAgentConfigMapName, ports, + dockerSecretNames, + jwtSecretName, + caSecretName, + clientSSLSecretName, ) var object client.Object @@ -271,15 +428,18 @@ func (p *NginxProvisioner) buildNginxDeployment( return object } +//nolint:gocyclo // will refactor at some point func (p *NginxProvisioner) buildNginxPodTemplateSpec( objectMeta metav1.ObjectMeta, nProxyCfg *graph.EffectiveNginxProxy, ngxIncludesConfigMapName string, ngxAgentConfigMapName string, ports map[int32]struct{}, + dockerSecretNames map[string]string, + jwtSecretName string, + caSecretName string, + clientSSLSecretName string, ) corev1.PodTemplateSpec { - // TODO(sberman): handle nginx plus; debug - containerPorts := make([]corev1.ContainerPort, 0, len(ports)) for port := range ports { containerPort := corev1.ContainerPort{ @@ -388,6 +548,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }, }, }, + ImagePullSecrets: []corev1.LocalObjectReference{}, ServiceAccountName: objectMeta.Name, Volumes: []corev1.Volume{ {Name: "nginx-agent", VolumeSource: emptyDirVolumeSource}, @@ -456,6 +617,76 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( } } + for name := range dockerSecretNames { + ref := corev1.LocalObjectReference{Name: name} + spec.Spec.ImagePullSecrets = append(spec.Spec.ImagePullSecrets, ref) + } + + if p.cfg.Plus { + initCmd := spec.Spec.InitContainers[0].Command + initCmd = append(initCmd, + "--source", "/includes/mgmt.conf", "--destination", "/etc/nginx/main-includes", "--nginx-plus") + spec.Spec.InitContainers[0].Command = initCmd + + volumeMounts := spec.Spec.Containers[0].VolumeMounts + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-lib", + MountPath: "/var/lib/nginx/state", + }) + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-lib", + VolumeSource: emptyDirVolumeSource, + }) + + if jwtSecretName != "" { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-plus-license", + MountPath: "/etc/nginx/license.jwt", + SubPath: "license.jwt", + }) + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-plus-license", + VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: jwtSecretName}}, + }) + } + if caSecretName != "" || clientSSLSecretName != "" { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "nginx-plus-usage-certs", + MountPath: "/etc/nginx/certs-bootstrap/", + }) + + sources := []corev1.VolumeProjection{} + + if caSecretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: caSecretName}, + }, + }) + } + + if clientSSLSecretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: clientSSLSecretName}, + }, + }) + } + + spec.Spec.Volumes = append(spec.Spec.Volumes, corev1.Volume{ + Name: "nginx-plus-usage-certs", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + } + + spec.Spec.Containers[0].VolumeMounts = volumeMounts + } + return spec } @@ -489,7 +720,18 @@ func (p *NginxProvisioner) buildImage(nProxyCfg *graph.EffectiveNginxProxy) (str return fmt.Sprintf("%s:%s", image, tag), pullPolicy } +// TODO(sberman): see about how this can be made more elegant. Maybe create some sort of Object factory +// that can better store/build all the objects we need, to reduce the amount of duplicate object lists that we +// have everywhere. func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName types.NamespacedName) []client.Object { + // order to delete: + // deployment/daemonset + // service + // serviceaccount + // configmaps + // secrets + // scc (if openshift) + objectMeta := metav1.ObjectMeta{ Name: deploymentNSName.Name, Namespace: deploymentNSName.Namespace, @@ -517,13 +759,54 @@ func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName }, } - // order to delete: - // deployment/daemonset - // service - // serviceaccount - // configmaps - // secrets - // scc (if openshift) + objects := []client.Object{deployment, service, serviceAccount, bootstrapCM, agentCM} + + for _, name := range p.cfg.NginxDockerSecretNames { + newName := controller.CreateNginxResourceName(deploymentNSName.Name, name) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: newName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, secret) + } - return []client.Object{deployment, service, serviceAccount, bootstrapCM, agentCM} + var jwtSecretName, caSecretName, clientSSLSecretName string + if p.cfg.Plus { + if p.cfg.PlusUsageConfig.CASecretName != "" { + caSecretName = controller.CreateNginxResourceName(deploymentNSName.Name, p.cfg.PlusUsageConfig.CASecretName) + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, caSecret) + } + if p.cfg.PlusUsageConfig.ClientSSLSecretName != "" { + clientSSLSecretName = controller.CreateNginxResourceName( + deploymentNSName.Name, + p.cfg.PlusUsageConfig.ClientSSLSecretName, + ) + clientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientSSLSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, clientSSLSecret) + } + + jwtSecretName = controller.CreateNginxResourceName(deploymentNSName.Name, p.cfg.PlusUsageConfig.SecretName) + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, jwtSecret) + } + + return objects } diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go new file mode 100644 index 0000000000..27fba0d734 --- /dev/null +++ b/internal/mode/static/provisioner/objects_test.go @@ -0,0 +1,617 @@ +package provisioner + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +func TestBuildNginxResourceObjects(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: "default", + Version: "1.0.0", + Image: "ngf-image", + }, + }, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + Spec: gatewayv1.GatewaySpec{ + Infrastructure: &gatewayv1.GatewayInfrastructure{ + Labels: map[gatewayv1.LabelKey]gatewayv1.LabelValue{ + "label": "value", + }, + Annotations: map[gatewayv1.AnnotationKey]gatewayv1.AnnotationValue{ + "annotation": "value", + }, + }, + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + } + + expLabels := map[string]string{ + "label": "value", + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + expAnnotations := map[string]string{ + "annotation": "value", + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(5)) + + validateLabelsAndAnnotations := func(obj client.Object) { + g.Expect(obj.GetLabels()).To(Equal(expLabels)) + g.Expect(obj.GetAnnotations()).To(Equal(expAnnotations)) + } + + validateMeta := func(obj client.Object) { + g.Expect(obj.GetName()).To(Equal(resourceName)) + validateLabelsAndAnnotations(obj) + } + + cmObj := objects[0] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix))) + validateLabelsAndAnnotations(cm) + g.Expect(cm.Data).To(HaveKey("main.conf")) + g.Expect(cm.Data["main.conf"]).To(ContainSubstring("info")) + + cmObj = objects[1] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix))) + validateLabelsAndAnnotations(cm) + g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("command:")) + + svcAcctObj := objects[2] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct) + + svcObj := objects[3] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc) + g.Expect(svc.Spec.Type).To(Equal(defaultServiceType)) + g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(defaultServicePolicy)) + g.Expect(svc.Spec.Ports).To(ContainElement(corev1.ServicePort{ + Port: 80, + Name: "port-80", + TargetPort: intstr.FromInt(80), + })) + + depObj := objects[4] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep) + + template := dep.Spec.Template + g.Expect(template.GetAnnotations()).To(HaveKey("prometheus.io/scrape")) + g.Expect(template.Spec.Containers).To(HaveLen(1)) + container := template.Spec.Containers[0] + + g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ + ContainerPort: config.DefaultNginxMetricsPort, + Name: "metrics", + })) + g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ + ContainerPort: 80, + Name: "port-80", + })) + + g.Expect(container.Image).To(Equal(fmt.Sprintf("%s:1.0.0", defaultNginxImagePath))) + g.Expect(container.ImagePullPolicy).To(Equal(defaultImagePullPolicy)) + + g.Expect(template.Spec.InitContainers).To(HaveLen(1)) + initContainer := template.Spec.InitContainers[0] + + g.Expect(initContainer.Image).To(Equal("ngf-image")) + g.Expect(initContainer.ImagePullPolicy).To(Equal(defaultImagePullPolicy)) +} + +func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: "default", + Version: "1.0.0", + }, + }, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + nProxyCfg := &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + Metrics: &ngfAPIv1alpha2.Metrics{ + Port: helpers.GetPointer[int32](8080), + }, + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Service: &ngfAPIv1alpha2.ServiceSpec{ + ServiceType: helpers.GetPointer(ngfAPIv1alpha2.ServiceTypeNodePort), + ExternalTrafficPolicy: helpers.GetPointer(ngfAPIv1alpha2.ExternalTrafficPolicyCluster), + LoadBalancerIP: helpers.GetPointer("1.2.3.4"), + LoadBalancerSourceRanges: []string{"5.6.7.8"}, + }, + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](3), + Pod: ngfAPIv1alpha2.PodSpec{ + TerminationGracePeriodSeconds: helpers.GetPointer[int64](25), + }, + Container: ngfAPIv1alpha2.ContainerSpec{ + Image: &ngfAPIv1alpha2.Image{ + Repository: helpers.GetPointer("nginx-repo"), + Tag: helpers.GetPointer("1.1.1"), + PullPolicy: helpers.GetPointer(ngfAPIv1alpha2.PullAlways), + }, + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.Quantity{Format: "100m"}, + }, + }, + }, + }, + }, + } + + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(5)) + + cmObj := objects[0] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("main.conf")) + g.Expect(cm.Data["main.conf"]).To(ContainSubstring("debug")) + + cmObj = objects[1] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("level: debug")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("port: 8080")) + + svcObj := objects[3] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(corev1.ServiceExternalTrafficPolicyTypeCluster)) + g.Expect(svc.Spec.LoadBalancerIP).To(Equal("1.2.3.4")) + g.Expect(svc.Spec.LoadBalancerSourceRanges).To(Equal([]string{"5.6.7.8"})) + + depObj := objects[4] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + template := dep.Spec.Template + g.Expect(*template.Spec.TerminationGracePeriodSeconds).To(Equal(int64(25))) + + container := template.Spec.Containers[0] + + g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ + ContainerPort: 8080, + Name: "metrics", + })) + + g.Expect(container.Image).To(Equal("nginx-repo:1.1.1")) + g.Expect(container.ImagePullPolicy).To(Equal(corev1.PullAlways)) + g.Expect(container.Resources.Limits).To(HaveKey(corev1.ResourceCPU)) + g.Expect(container.Resources.Limits[corev1.ResourceCPU].Format).To(Equal(resource.Format("100m"))) +} + +func TestBuildNginxResourceObjects_Plus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"license.jwt": []byte("jwt")}, + } + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"ca.crt": []byte("ca")}, + } + clientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + + fakeClient := fake.NewFakeClient(jwtSecret, caSecret, clientSSLSecret) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + Endpoint: "test.com", + SkipVerify: true, + }, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + Spec: gatewayv1.GatewaySpec{ + Infrastructure: &gatewayv1.GatewayInfrastructure{ + Labels: map[gatewayv1.LabelKey]gatewayv1.LabelValue{ + "label": "value", + }, + Annotations: map[gatewayv1.AnnotationKey]gatewayv1.AnnotationValue{ + "annotation": "value", + }, + }, + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(8)) + + expLabels := map[string]string{ + "label": "value", + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + expAnnotations := map[string]string{ + "annotation": "value", + } + + secretObj := objects[0] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, jwtTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("license.jwt")) + g.Expect(secret.Data["license.jwt"]).To(Equal([]byte("jwt"))) + + secretObj = objects[1] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, caTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("ca.crt")) + g.Expect(secret.Data["ca.crt"]).To(Equal([]byte("ca"))) + + secretObj = objects[2] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, clientTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("tls.crt")) + g.Expect(secret.Data["tls.crt"]).To(Equal([]byte("tls"))) + + cmObj := objects[3] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("mgmt.conf")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("usage_report endpoint=test.com;")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_verify off;")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_trusted_certificate")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate_key")) + + cmObj = objects[4] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) + g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("api-action")) + + depObj := objects[7] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + template := dep.Spec.Template + container := template.Spec.Containers[0] + initContainer := template.Spec.InitContainers[0] + + g.Expect(initContainer.Command).To(ContainElement("/includes/mgmt.conf")) + g.Expect(container.VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "nginx-plus-license", + MountPath: "/etc/nginx/license.jwt", + SubPath: "license.jwt", + })) + g.Expect(container.VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "nginx-plus-usage-certs", + MountPath: "/etc/nginx/certs-bootstrap/", + })) +} + +func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + dockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker")}, + } + fakeClient := fake.NewFakeClient(dockerSecret) + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + NginxDockerSecretNames: []string{dockerTestSecretName}, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(6)) + + expLabels := map[string]string{ + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + + secretObj := objects[0] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + depObj := objects[5] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + + g.Expect(dep.Spec.Template.Spec.ImagePullSecrets).To(ContainElement(corev1.LocalObjectReference{ + Name: controller.CreateNginxResourceName(resourceName, dockerTestSecretName), + })) +} + +func TestGetAndUpdateSecret_NotFound(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeClient := fake.NewFakeClient() + + provisioner := &NginxProvisioner{ + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: "default", + }, + }, + k8sClient: fakeClient, + } + + _, err := provisioner.getAndUpdateSecret("non-existent-secret", metav1.ObjectMeta{ + Name: "new-secret", + Namespace: "default", + }) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("error getting secret")) +} + +func TestBuildNginxResourceObjectsForDeletion(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{} + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(5)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + depObj := objects[0] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep, deploymentNSName.Name) + + svcObj := objects[1] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc, deploymentNSName.Name) + + svcAcctObj := objects[2] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct, deploymentNSName.Name) + + cmObj := objects[3] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix)) + + cmObj = objects[4] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix)) +} + +func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{ + cfg: Config{ + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + }, + NginxDockerSecretNames: []string{dockerTestSecretName}, + }, + } + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(9)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + depObj := objects[0] + dep, ok := depObj.(*appsv1.Deployment) + g.Expect(ok).To(BeTrue()) + validateMeta(dep, deploymentNSName.Name) + + svcObj := objects[1] + svc, ok := svcObj.(*corev1.Service) + g.Expect(ok).To(BeTrue()) + validateMeta(svc, deploymentNSName.Name) + + svcAcctObj := objects[2] + svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) + g.Expect(ok).To(BeTrue()) + validateMeta(svcAcct, deploymentNSName.Name) + + cmObj := objects[3] + cm, ok := cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxIncludesConfigMapNameSuffix)) + + cmObj = objects[4] + cm, ok = cmObj.(*corev1.ConfigMap) + g.Expect(ok).To(BeTrue()) + validateMeta(cm, controller.CreateNginxResourceName(deploymentNSName.Name, nginxAgentConfigMapNameSuffix)) + + secretObj := objects[5] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.NginxDockerSecretNames[0], + )) + + secretObj = objects[6] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.PlusUsageConfig.CASecretName, + )) + + secretObj = objects[7] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.PlusUsageConfig.ClientSSLSecretName, + )) +} diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index a505cf90ab..643fb3c6ff 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -41,13 +41,17 @@ type Provisioner interface { // Config is the configuration for the Provisioner. type Config struct { - DeploymentStore *agent.DeploymentStore - StatusQueue *status.Queue - Logger logr.Logger - GatewayPodConfig config.GatewayPodConfig - EventRecorder record.EventRecorder - GCName string - Plus bool + GCName string + + DeploymentStore agent.DeploymentStorer + StatusQueue *status.Queue + GatewayPodConfig *config.GatewayPodConfig + PlusUsageConfig *config.UsageReportConfig + EventRecorder record.EventRecorder + Logger logr.Logger + NginxDockerSecretNames []string + + Plus bool } // NginxProvisioner handles provisioning nginx kubernetes resources. @@ -70,7 +74,13 @@ func NewNginxProvisioner( mgr manager.Manager, cfg Config, ) (*NginxProvisioner, *events.EventLoop, error) { - store := newStore() + var jwtSecretName, caSecretName, clientSSLSecretName string + if cfg.Plus && cfg.PlusUsageConfig != nil { + jwtSecretName = cfg.PlusUsageConfig.SecretName + caSecretName = cfg.PlusUsageConfig.CASecretName + clientSSLSecretName = cfg.PlusUsageConfig.ClientSSLSecretName + } + store := newStore(cfg.NginxDockerSecretNames, jwtSecretName, caSecretName, clientSSLSecretName) selector := metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -150,7 +160,10 @@ func (p *NginxProvisioner) provisionNginx( return nil } - objects := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + if err != nil { + return fmt.Errorf("error provisioning nginx resources :%w", err) + } p.cfg.Logger.Info( "Creating/Updating nginx resources", @@ -194,10 +207,6 @@ func (p *NginxProvisioner) provisionNginx( } cancel() - if res != controllerutil.OperationResultCreated && res != controllerutil.OperationResultUpdated { - continue - } - switch o := obj.(type) { case *appsv1.Deployment: deploymentObj = o @@ -211,6 +220,10 @@ func (p *NginxProvisioner) provisionNginx( } } + if res != controllerutil.OperationResultCreated && res != controllerutil.OperationResultUpdated { + continue + } + result := cases.Title(language.English, cases.Compact).String(string(res)) p.cfg.Logger.V(1).Info( fmt.Sprintf("%s nginx %s", result, obj.GetObjectKind().GroupVersionKind().Kind), @@ -259,7 +272,11 @@ func (p *NginxProvisioner) reprovisionNginx( if !p.isLeader() { return nil } - objects := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + + objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) + if err != nil { + return fmt.Errorf("error provisioning nginx resources :%w", err) + } p.cfg.Logger.Info( "Re-creating nginx resources", @@ -287,36 +304,34 @@ func (p *NginxProvisioner) reprovisionNginx( } func (p *NginxProvisioner) deprovisionNginx(ctx context.Context, gatewayNSName types.NamespacedName) error { - if !p.isLeader() { - return nil - } - - p.cfg.Logger.Info( - "Removing nginx resources for Gateway", - "name", gatewayNSName.Name, - "namespace", gatewayNSName.Namespace, - ) - deploymentNSName := types.NamespacedName{ Name: controller.CreateNginxResourceName(gatewayNSName.Name, p.cfg.GCName), Namespace: gatewayNSName.Namespace, } - objects := p.buildNginxResourceObjectsForDeletion(deploymentNSName) + if p.isLeader() { + p.cfg.Logger.Info( + "Removing nginx resources for Gateway", + "name", gatewayNSName.Name, + "namespace", gatewayNSName.Namespace, + ) - createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() + objects := p.buildNginxResourceObjectsForDeletion(deploymentNSName) - for _, obj := range objects { - if err := p.k8sClient.Delete(createCtx, obj); err != nil && !apierrors.IsNotFound(err) { - p.cfg.EventRecorder.Eventf( - obj, - corev1.EventTypeWarning, - "DeleteFailed", - "Failed to delete nginx resource: %s", - err.Error(), - ) - return err + createCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for _, obj := range objects { + if err := p.k8sClient.Delete(createCtx, obj); err != nil && !apierrors.IsNotFound(err) { + p.cfg.EventRecorder.Eventf( + obj, + corev1.EventTypeWarning, + "DeleteFailed", + "Failed to delete nginx resource: %s", + err.Error(), + ) + return err + } } } @@ -335,6 +350,10 @@ func (p *NginxProvisioner) RegisterGateway( gateway *graph.Gateway, resourceName string, ) error { + if !p.isLeader() { + return nil + } + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) if updated := p.store.registerResourceInGatewayConfig(gatewayNSName, gateway); !updated { return nil diff --git a/internal/mode/static/provisioner/provisioner_test.go b/internal/mode/static/provisioner/provisioner_test.go new file mode 100644 index 0000000000..2c611912d8 --- /dev/null +++ b/internal/mode/static/provisioner/provisioner_test.go @@ -0,0 +1,361 @@ +package provisioner + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/manager" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +var ( + jwtTestSecretName = "jwt-secret" + caTestSecretName = "ca-secret" + clientTestSecretName = "client-secret" + dockerTestSecretName = "docker-secret" + ngfNamespace = "nginx-gateway" +) + +func createScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + + utilruntime.Must(gatewayv1.Install(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + + return scheme +} + +func expectResourcesToExist(g *WithT, k8sClient client.Client, nsName types.NamespacedName, plus bool) { + g.Expect(k8sClient.Get(context.TODO(), nsName, &appsv1.Deployment{})).To(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.Service{})).To(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.ServiceAccount{})).To(Succeed()) + + boostrapCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), boostrapCM, &corev1.ConfigMap{})).To(Succeed()) + + agentCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxAgentConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).To(Succeed()) + + if !plus { + return + } + + jwtSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, jwtTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), jwtSecret, &corev1.Secret{})).To(Succeed()) + + caSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, caTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), caSecret, &corev1.Secret{})).To(Succeed()) + + clientSSLSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, clientTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), clientSSLSecret, &corev1.Secret{})).To(Succeed()) + + dockerSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, dockerTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), dockerSecret, &corev1.Secret{})).To(Succeed()) +} + +func expectResourcesToNotExist(g *WithT, k8sClient client.Client, nsName types.NamespacedName) { + g.Expect(k8sClient.Get(context.TODO(), nsName, &appsv1.Deployment{})).ToNot(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.Service{})).ToNot(Succeed()) + + g.Expect(k8sClient.Get(context.TODO(), nsName, &corev1.ServiceAccount{})).ToNot(Succeed()) + + boostrapCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxIncludesConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), boostrapCM, &corev1.ConfigMap{})).ToNot(Succeed()) + + agentCM := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, nginxAgentConfigMapNameSuffix), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).ToNot(Succeed()) + + jwtSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, jwtTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), jwtSecret, &corev1.Secret{})).ToNot(Succeed()) + + caSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, caTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), caSecret, &corev1.Secret{})).ToNot(Succeed()) + + clientSSLSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, clientTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), clientSSLSecret, &corev1.Secret{})).ToNot(Succeed()) + + dockerSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, dockerTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), dockerSecret, &corev1.Secret{})).ToNot(Succeed()) +} + +func defaultNginxProvisioner( + objects ...client.Object, +) (*NginxProvisioner, client.Client, *agentfakes.FakeDeploymentStorer) { + fakeClient := fake.NewClientBuilder().WithScheme(createScheme()).WithObjects(objects...).Build() + deploymentStore := &agentfakes.FakeDeploymentStorer{} + + return &NginxProvisioner{ + store: newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret"), + k8sClient: fakeClient, + cfg: Config{ + DeploymentStore: deploymentStore, + GatewayPodConfig: &config.GatewayPodConfig{ + InstanceName: "test-instance", + Namespace: ngfNamespace, + }, + Logger: logr.Discard(), + EventRecorder: &record.FakeRecorder{}, + GCName: "nginx", + Plus: true, + PlusUsageConfig: &config.UsageReportConfig{ + SecretName: jwtTestSecretName, + CASecretName: caTestSecretName, + ClientSSLSecretName: clientTestSecretName, + }, + NginxDockerSecretNames: []string{dockerTestSecretName}, + }, + leader: true, + }, fakeClient, deploymentStore +} + +func TestNewNginxProvisioner(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + mgr, err := manager.New(&rest.Config{}, manager.Options{Scheme: createScheme()}) + g.Expect(err).ToNot(HaveOccurred()) + + cfg := Config{ + GCName: "test-gc", + GatewayPodConfig: &config.GatewayPodConfig{ + InstanceName: "test-instance", + }, + Logger: logr.Discard(), + } + + provisioner, eventLoop, err := NewNginxProvisioner(context.TODO(), mgr, cfg) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(provisioner).NotTo(BeNil()) + g.Expect(eventLoop).NotTo(BeNil()) + + labelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/managed-by": "test-instance-test-gc", + "app.kubernetes.io/instance": "test-instance", + }, + } + g.Expect(provisioner.baseLabelSelector).To(Equal(labelSelector)) +} + +func TestEnable(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx", + Namespace: "default", + }, + } + provisioner, fakeClient, _ := defaultNginxProvisioner(dep) + provisioner.setResourceToDelete(types.NamespacedName{Name: "gw", Namespace: "default"}) + provisioner.leader = false + + provisioner.Enable(context.TODO()) + g.Expect(provisioner.isLeader()).To(BeTrue()) + g.Expect(provisioner.resourcesToDeleteOnStartup).To(BeEmpty()) + expectResourcesToNotExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}) +} + +func TestRegisterGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + } + + objects := []client.Object{ + gateway.Source, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + }, + } + + provisioner, fakeClient, deploymentStore := defaultNginxProvisioner(objects...) + + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, true) // plus + + // Call again, no updates so nothing should happen + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, true) // plus + + // Now set the Gateway to invalid, and expect a deprovision to occur + invalid := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: false, + } + g.Expect(provisioner.RegisterGateway(context.TODO(), invalid, "gw-nginx")).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}) + + resources := provisioner.store.getNginxResourcesForGateway(types.NamespacedName{Name: "gw", Namespace: "default"}) + g.Expect(resources).To(BeNil()) + + g.Expect(deploymentStore.RemoveCallCount()).To(Equal(1)) +} + +func TestNonLeaderProvisioner(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner, fakeClient, deploymentStore := defaultNginxProvisioner() + provisioner.leader = false + nsName := types.NamespacedName{Name: "gw-nginx", Namespace: "default"} + + g.Expect(provisioner.RegisterGateway(context.TODO(), nil, "gw-nginx")).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.provisionNginx(context.TODO(), "gw-nginx", nil, nil)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.reprovisionNginx(context.TODO(), "gw-nginx", nil, nil)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + + g.Expect(provisioner.deprovisionNginx(context.TODO(), nsName)).To(Succeed()) + expectResourcesToNotExist(g, fakeClient, nsName) + g.Expect(deploymentStore.RemoveCallCount()).To(Equal(1)) +} + +func TestProvisionerRestartsDeployment(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + gateway := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + }, + } + + // provision everything first + provisioner, fakeClient, _ := defaultNginxProvisioner(gateway.Source) + provisioner.cfg.Plus = false + provisioner.cfg.NginxDockerSecretNames = nil + + g.Expect(provisioner.RegisterGateway(context.TODO(), gateway, "gw-nginx")).To(Succeed()) + expectResourcesToExist(g, fakeClient, types.NamespacedName{Name: "gw-nginx", Namespace: "default"}, false) // not plus + + // update agent config + updatedConfig := &graph.Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + }, + Valid: true, + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelInfo), + }, + }, + } + g.Expect(provisioner.RegisterGateway(context.TODO(), updatedConfig, "gw-nginx")).To(Succeed()) + + // verify deployment was updated with the restart annotation + dep := &appsv1.Deployment{} + key := types.NamespacedName{Name: "gw-nginx", Namespace: "default"} + g.Expect(fakeClient.Get(context.TODO(), key, dep)).To(Succeed()) + + g.Expect(dep.Spec.Template.GetAnnotations()).To(HaveKey(controller.RestartedAnnotation)) +} diff --git a/internal/mode/static/provisioner/setter.go b/internal/mode/static/provisioner/setter.go index 4195fd6d2a..dfe42321bc 100644 --- a/internal/mode/static/provisioner/setter.go +++ b/internal/mode/static/provisioner/setter.go @@ -18,6 +18,8 @@ func objectSpecSetter(object client.Object) controllerutil.MutateFn { return func() error { return nil } case *corev1.ConfigMap: return configMapSpecSetter(obj, obj.Data) + case *corev1.Secret: + return secretSpecSetter(obj, obj.Data) } return nil @@ -43,3 +45,10 @@ func configMapSpecSetter(configMap *corev1.ConfigMap, data map[string]string) co return nil } } + +func secretSpecSetter(secret *corev1.Secret, data map[string][]byte) controllerutil.MutateFn { + return func() error { + secret.Data = data + return nil + } +} diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go index bf78ee21c0..5a57d5cc99 100644 --- a/internal/mode/static/provisioner/store.go +++ b/internal/mode/static/provisioner/store.go @@ -7,6 +7,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" @@ -16,12 +17,16 @@ import ( // NginxResources are all of the NGINX resources deployed in relation to a Gateway. type NginxResources struct { - Gateway *graph.Gateway - Deployment *appsv1.Deployment - Service *corev1.Service - ServiceAccount *corev1.ServiceAccount - BootstrapConfigMap *corev1.ConfigMap - AgentConfigMap *corev1.ConfigMap + Gateway *graph.Gateway + Deployment metav1.ObjectMeta + Service metav1.ObjectMeta + ServiceAccount metav1.ObjectMeta + BootstrapConfigMap metav1.ObjectMeta + AgentConfigMap metav1.ObjectMeta + PlusJWTSecret metav1.ObjectMeta + PlusClientSSLSecret metav1.ObjectMeta + PlusCASecret metav1.ObjectMeta + DockerSecrets []metav1.ObjectMeta } // store stores the cluster state needed by the provisioner and allows to update it from the events. @@ -32,13 +37,33 @@ type store struct { // nginxResources is a map of Gateway NamespacedNames and their associated nginx resources. nginxResources map[types.NamespacedName]*NginxResources + dockerSecretNames map[string]struct{} + // NGINX Plus secrets + jwtSecretName string + caSecretName string + clientSSLSecretName string + lock sync.RWMutex } -func newStore() *store { +func newStore( + dockerSecretNames []string, + jwtSecretName, + caSecretName, + clientSSLSecretName string, +) *store { + dockerSecretNamesMap := make(map[string]struct{}) + for _, name := range dockerSecretNames { + dockerSecretNamesMap[name] = struct{}{} + } + return &store{ - gateways: make(map[types.NamespacedName]*gatewayv1.Gateway), - nginxResources: make(map[types.NamespacedName]*NginxResources), + gateways: make(map[types.NamespacedName]*gatewayv1.Gateway), + nginxResources: make(map[types.NamespacedName]*NginxResources), + dockerSecretNames: dockerSecretNamesMap, + jwtSecretName: jwtSecretName, + caSecretName: caSecretName, + clientSSLSecretName: clientSSLSecretName, } } @@ -85,48 +110,105 @@ func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedNa case *appsv1.Deployment: if cfg, ok := s.nginxResources[gatewayNSName]; !ok { s.nginxResources[gatewayNSName] = &NginxResources{ - Deployment: obj, + Deployment: obj.ObjectMeta, } } else { - cfg.Deployment = obj + cfg.Deployment = obj.ObjectMeta } case *corev1.Service: if cfg, ok := s.nginxResources[gatewayNSName]; !ok { s.nginxResources[gatewayNSName] = &NginxResources{ - Service: obj, + Service: obj.ObjectMeta, } } else { - cfg.Service = obj + cfg.Service = obj.ObjectMeta } case *corev1.ServiceAccount: if cfg, ok := s.nginxResources[gatewayNSName]; !ok { s.nginxResources[gatewayNSName] = &NginxResources{ - ServiceAccount: obj, + ServiceAccount: obj.ObjectMeta, } } else { - cfg.ServiceAccount = obj + cfg.ServiceAccount = obj.ObjectMeta } case *corev1.ConfigMap: - if cfg, ok := s.nginxResources[gatewayNSName]; !ok { - if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { - s.nginxResources[gatewayNSName] = &NginxResources{ - BootstrapConfigMap: obj, - } - } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + s.registerConfigMapInGatewayConfig(obj, gatewayNSName) + case *corev1.Secret: + s.registerSecretInGatewayConfig(obj, gatewayNSName) + } + + return true +} + +func (s *store) registerConfigMapInGatewayConfig(obj *corev1.ConfigMap, gatewayNSName types.NamespacedName) { + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + BootstrapConfigMap: obj.ObjectMeta, + } + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + s.nginxResources[gatewayNSName] = &NginxResources{ + AgentConfigMap: obj.ObjectMeta, + } + } + } else { + if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { + cfg.BootstrapConfigMap = obj.ObjectMeta + } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { + cfg.AgentConfigMap = obj.ObjectMeta + } + } +} + +func (s *store) registerSecretInGatewayConfig(obj *corev1.Secret, gatewayNSName types.NamespacedName) { + hasSuffix := func(str, suffix string) bool { + return suffix != "" && strings.HasSuffix(str, suffix) + } + + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + switch { + case hasSuffix(obj.GetName(), s.jwtSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusJWTSecret: obj.ObjectMeta, + } + case hasSuffix(obj.GetName(), s.caSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusCASecret: obj.ObjectMeta, + } + case hasSuffix(obj.GetName(), s.clientSSLSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + PlusClientSSLSecret: obj.ObjectMeta, + } + } + + for secret := range s.dockerSecretNames { + if hasSuffix(obj.GetName(), secret) { s.nginxResources[gatewayNSName] = &NginxResources{ - AgentConfigMap: obj, + DockerSecrets: []metav1.ObjectMeta{obj.ObjectMeta}, } + break } - } else { - if strings.HasSuffix(obj.GetName(), nginxIncludesConfigMapNameSuffix) { - cfg.BootstrapConfigMap = obj - } else if strings.HasSuffix(obj.GetName(), nginxAgentConfigMapNameSuffix) { - cfg.AgentConfigMap = obj + } + } else { + switch { + case hasSuffix(obj.GetName(), s.jwtSecretName): + cfg.PlusJWTSecret = obj.ObjectMeta + case hasSuffix(obj.GetName(), s.caSecretName): + cfg.PlusCASecret = obj.ObjectMeta + case hasSuffix(obj.GetName(), s.clientSSLSecretName): + cfg.PlusClientSSLSecret = obj.ObjectMeta + } + + for secret := range s.dockerSecretNames { + if hasSuffix(obj.GetName(), secret) { + if len(cfg.DockerSecrets) == 0 { + cfg.DockerSecrets = []metav1.ObjectMeta{obj.ObjectMeta} + } else { + cfg.DockerSecrets = append(cfg.DockerSecrets, obj.ObjectMeta) + } } } } - - return true } func gatewayChanged(original, updated *graph.Gateway) bool { @@ -159,34 +241,33 @@ func (s *store) deleteResourcesForGateway(nsName types.NamespacedName) { delete(s.nginxResources, nsName) } -//nolint:gocyclo // will refactor at some point func (s *store) gatewayExistsForResource(object client.Object, nsName types.NamespacedName) *graph.Gateway { s.lock.RLock() defer s.lock.RUnlock() - resourceMatches := func(obj client.Object) bool { - return obj.GetName() == nsName.Name && obj.GetNamespace() == nsName.Namespace - } - for _, resources := range s.nginxResources { switch object.(type) { case *appsv1.Deployment: - if resources.Deployment != nil && resourceMatches(resources.Deployment) { + if resourceMatches(resources.Deployment, nsName) { return resources.Gateway } case *corev1.Service: - if resources.Service != nil && resourceMatches(resources.Service) { + if resourceMatches(resources.Service, nsName) { return resources.Gateway } case *corev1.ServiceAccount: - if resources.ServiceAccount != nil && resourceMatches(resources.ServiceAccount) { + if resourceMatches(resources.ServiceAccount, nsName) { return resources.Gateway } case *corev1.ConfigMap: - if resources.BootstrapConfigMap != nil && resourceMatches(resources.BootstrapConfigMap) { + if resourceMatches(resources.BootstrapConfigMap, nsName) { + return resources.Gateway + } + if resourceMatches(resources.AgentConfigMap, nsName) { return resources.Gateway } - if resources.AgentConfigMap != nil && resourceMatches(resources.AgentConfigMap) { + case *corev1.Secret: + if secretResourceMatches(resources, nsName) { return resources.Gateway } } @@ -194,3 +275,25 @@ func (s *store) gatewayExistsForResource(object client.Object, nsName types.Name return nil } + +func secretResourceMatches(resources *NginxResources, nsName types.NamespacedName) bool { + for _, secret := range resources.DockerSecrets { + if resourceMatches(secret, nsName) { + return true + } + } + + if resourceMatches(resources.PlusJWTSecret, nsName) { + return true + } + + if resourceMatches(resources.PlusClientSSLSecret, nsName) { + return true + } + + return resourceMatches(resources.PlusCASecret, nsName) +} + +func resourceMatches(objMeta metav1.ObjectMeta, nsName types.NamespacedName) bool { + return objMeta.GetName() == nsName.Name && objMeta.GetNamespace() == nsName.Namespace +} diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go new file mode 100644 index 0000000000..0358341f03 --- /dev/null +++ b/internal/mode/static/provisioner/store_test.go @@ -0,0 +1,505 @@ +package provisioner + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" +) + +func TestNewStore(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret") + + g.Expect(store).NotTo(BeNil()) + g.Expect(store.dockerSecretNames).To(HaveKey("docker-secret")) + g.Expect(store.jwtSecretName).To(Equal("jwt-secret")) + g.Expect(store.caSecretName).To(Equal("ca-secret")) + g.Expect(store.clientSSLSecretName).To(Equal("client-ssl-secret")) +} + +func TestUpdateGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + Namespace: "default", + }, + } + nsName := client.ObjectKeyFromObject(gateway) + + store.updateGateway(gateway) + + g.Expect(store.gateways).To(HaveKey(nsName)) + g.Expect(store.getGateway(nsName)).To(Equal(gateway)) +} + +func TestDeleteGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.gateways[nsName] = &gatewayv1.Gateway{} + + store.deleteGateway(nsName) + + g.Expect(store.gateways).NotTo(HaveKey(nsName)) + g.Expect(store.getGateway(nsName)).To(BeNil()) +} + +func TestRegisterResourceInGatewayConfig(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + + registerAndGetResources := func(obj interface{}) *NginxResources { + changed := store.registerResourceInGatewayConfig(nsName, obj) + g.Expect(changed).To(BeTrue(), fmt.Sprintf("failed: %T", obj)) + g.Expect(store.nginxResources).To(HaveKey(nsName), fmt.Sprintf("failed: %T", obj)) + + return store.getNginxResourcesForGateway(nsName) + } + + // Gateway, new config + gw := &graph.Gateway{} + resources := registerAndGetResources(gw) + g.Expect(resources.Gateway).To(Equal(gw)) + + // Gateway, updated config + gw = &graph.Gateway{ + Valid: true, + } + resources = registerAndGetResources(gw) + g.Expect(resources.Gateway).To(Equal(gw)) + + defaultMeta := metav1.ObjectMeta{ + Name: "test-resource", + Namespace: "default", + } + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Deployment + dep := &appsv1.Deployment{ObjectMeta: defaultMeta} + resources = registerAndGetResources(dep) + g.Expect(resources.Deployment).To(Equal(defaultMeta)) + + // Deployment again, already exists + resources = registerAndGetResources(dep) + g.Expect(resources.Deployment).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Service + svc := &corev1.Service{ObjectMeta: defaultMeta} + resources = registerAndGetResources(svc) + g.Expect(resources.Service).To(Equal(defaultMeta)) + + // Service again, already exists + resources = registerAndGetResources(svc) + g.Expect(resources.Service).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ServiceAccount + svcAcct := &corev1.ServiceAccount{ObjectMeta: defaultMeta} + resources = registerAndGetResources(svcAcct) + g.Expect(resources.ServiceAccount).To(Equal(defaultMeta)) + + // ServiceAccount again, already exists + resources = registerAndGetResources(svcAcct) + g.Expect(resources.ServiceAccount).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ConfigMap + bootstrapCMMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, nginxIncludesConfigMapNameSuffix), + Namespace: defaultMeta.Namespace, + } + bootstrapCM := &corev1.ConfigMap{ObjectMeta: bootstrapCMMeta} + resources = registerAndGetResources(bootstrapCM) + g.Expect(resources.BootstrapConfigMap).To(Equal(bootstrapCMMeta)) + + // ConfigMap again, already exists + resources = registerAndGetResources(bootstrapCM) + g.Expect(resources.BootstrapConfigMap).To(Equal(bootstrapCMMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // ConfigMap + agentCMMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, nginxAgentConfigMapNameSuffix), + Namespace: defaultMeta.Namespace, + } + agentCM := &corev1.ConfigMap{ObjectMeta: agentCMMeta} + resources = registerAndGetResources(agentCM) + g.Expect(resources.AgentConfigMap).To(Equal(agentCMMeta)) + + // ConfigMap again, already exists + resources = registerAndGetResources(agentCM) + g.Expect(resources.AgentConfigMap).To(Equal(agentCMMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + jwtSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.jwtSecretName), + Namespace: defaultMeta.Namespace, + } + jwtSecret := &corev1.Secret{ObjectMeta: jwtSecretMeta} + resources = registerAndGetResources(jwtSecret) + g.Expect(resources.PlusJWTSecret).To(Equal(jwtSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(jwtSecret) + g.Expect(resources.PlusJWTSecret).To(Equal(jwtSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + caSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.caSecretName), + Namespace: defaultMeta.Namespace, + } + caSecret := &corev1.Secret{ObjectMeta: caSecretMeta} + resources = registerAndGetResources(caSecret) + g.Expect(resources.PlusCASecret).To(Equal(caSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(caSecret) + g.Expect(resources.PlusCASecret).To(Equal(caSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Secret + clientSSLSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.clientSSLSecretName), + Namespace: defaultMeta.Namespace, + } + clientSSLSecret := &corev1.Secret{ObjectMeta: clientSSLSecretMeta} + resources = registerAndGetResources(clientSSLSecret) + g.Expect(resources.PlusClientSSLSecret).To(Equal(clientSSLSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(clientSSLSecret) + g.Expect(resources.PlusClientSSLSecret).To(Equal(clientSSLSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // Docker Secret + dockerSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, "docker-secret"), + Namespace: defaultMeta.Namespace, + } + dockerSecret := &corev1.Secret{ObjectMeta: dockerSecretMeta} + resources = registerAndGetResources(dockerSecret) + g.Expect(resources.DockerSecrets).To(ContainElements(dockerSecretMeta)) + + // Docker Secret again, already exists + resources = registerAndGetResources(dockerSecret) + g.Expect(resources.DockerSecrets).To(ContainElement(dockerSecretMeta)) +} + +func TestGatewayChanged(t *testing.T) { + t.Parallel() + + tests := []struct { + original *graph.Gateway + updated *graph.Gateway + name string + changed bool + }{ + { + name: "nil gateway", + original: nil, + changed: true, + }, + { + name: "valid field changes", + original: &graph.Gateway{Valid: true}, + updated: &graph.Gateway{Valid: false}, + changed: true, + }, + { + name: "source changes", + original: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + updated: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 81, + }, + }, + }, + }}, + changed: true, + }, + { + name: "effective nginx proxy config changes", + original: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](1), + }, + }, + }, + }, + updated: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer[int32](2), + }, + }, + }, + }, + changed: true, + }, + { + name: "no changes", + original: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + updated: &graph.Gateway{Source: &gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + Listeners: []gatewayv1.Listener{ + { + Port: 80, + }, + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + g.Expect(gatewayChanged(test.original, test.updated)).To(Equal(test.changed)) + }) + } +} + +func TestDeleteResourcesForGateway(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.nginxResources[nsName] = &NginxResources{} + + store.deleteResourcesForGateway(nsName) + + g.Expect(store.nginxResources).NotTo(HaveKey(nsName)) +} + +func TestGatewayExistsForResource(t *testing.T) { + t.Parallel() + + store := newStore(nil, "", "", "") + gateway := &graph.Gateway{} + store.nginxResources[types.NamespacedName{Name: "test-gateway", Namespace: "default"}] = &NginxResources{ + Gateway: gateway, + Deployment: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Service: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + ServiceAccount: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + BootstrapConfigMap: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + AgentConfigMap: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + PlusJWTSecret: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + PlusCASecret: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + PlusClientSSLSecret: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + DockerSecrets: []metav1.ObjectMeta{ + { + Name: "test-docker-secret", + Namespace: "default", + }, + }, + } + + tests := []struct { + expected *graph.Gateway + object client.Object + name string + }{ + { + name: "Deployment exists", + object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Service exists", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "ServiceAccount exists", + object: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Bootstrap ConfigMap exists", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Agent ConfigMap exists", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "JWT Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "CA Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Client SSL Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Docker Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-docker-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "Resource does not exist", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-service", + Namespace: "default", + }, + }, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + result := store.gatewayExistsForResource(test.object, client.ObjectKeyFromObject(test.object)) + g.Expect(result).To(Equal(test.expected)) + }) + } +} diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go index 0b4d1ca308..05e1b96623 100644 --- a/internal/mode/static/provisioner/templates.go +++ b/internal/mode/static/provisioner/templates.go @@ -3,31 +3,31 @@ package provisioner import gotemplate "text/template" var ( - mainTemplate = gotemplate.Must(gotemplate.New("main").Parse(mainTemplateText)) - // mgmtTemplate = gotemplate.Must(gotemplate.New("mgmt").Parse(mgmtTemplateText)). + mainTemplate = gotemplate.Must(gotemplate.New("main").Parse(mainTemplateText)) + mgmtTemplate = gotemplate.Must(gotemplate.New("mgmt").Parse(mgmtTemplateText)) agentTemplate = gotemplate.Must(gotemplate.New("agent").Parse(agentTemplateText)) ) const mainTemplateText = ` error_log stderr {{ .ErrorLevel }};` -// const mgmtTemplateText = `mgmt { -// {{- if .Values.nginx.usage.endpoint }} -// usage_report endpoint={{ .Values.nginx.usage.endpoint }}; -// {{- end }} -// {{- if .Values.nginx.usage.skipVerify }} -// ssl_verify off; -// {{- end }} -// {{- if .Values.nginx.usage.caSecretName }} -// ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; -// {{- end }} -// {{- if .Values.nginx.usage.clientSSLSecretName }} -// ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; -// ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; -// {{- end }} -// enforce_initial_report off; -// deployment_context /etc/nginx/main-includes/deployment_ctx.json; -// }` +const mgmtTemplateText = `mgmt { + {{- if .UsageEndpoint }} + usage_report endpoint={{ .UsageEndpoint }}; + {{- end }} + {{- if .SkipVerify }} + ssl_verify off; + {{- end }} + {{- if .UsageCASecret }} + ssl_trusted_certificate /etc/nginx/certs-bootstrap/ca.crt; + {{- end }} + {{- if .UsageClientSSLSecret }} + ssl_certificate /etc/nginx/certs-bootstrap/tls.crt; + ssl_certificate_key /etc/nginx/certs-bootstrap/tls.key; + {{- end }} + enforce_initial_report off; + deployment_context /etc/nginx/main-includes/deployment_ctx.json; +}` const agentTemplateText = `command: server: From 49353cbff02ffc63111d4d6870b39c96bdae5fe0 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 3 Mar 2025 17:04:07 -0700 Subject: [PATCH 13/32] CP/DP Split: remove unneeded provisioner mode (#3180) With the new deployment model, the provisioner mode for conformance tests is no longer needed. This code is removed, and at a later date the conformance tests will be updated to work with the new model. Renamed the "static-mode" to "controller". Also removed some unneeded metrics collection. --- .github/workflows/conformance.yml | 8 - .../templates/deployment.yaml | 2 +- cmd/gateway/commands.go | 59 +- cmd/gateway/commands_test.go | 26 +- cmd/gateway/main.go | 3 +- config/tests/static-deployment.yaml | 82 --- deploy/aws-nlb/deploy.yaml | 2 +- deploy/azure/deploy.yaml | 2 +- deploy/default/deploy.yaml | 2 +- deploy/experimental-nginx-plus/deploy.yaml | 2 +- deploy/experimental/deploy.yaml | 2 +- deploy/nginx-plus/deploy.yaml | 2 +- deploy/nodeport/deploy.yaml | 2 +- deploy/openshift/deploy.yaml | 2 +- .../snippets-filters-nginx-plus/deploy.yaml | 2 +- deploy/snippets-filters/deploy.yaml | 2 +- docs/developer/release-process.md | 5 +- docs/proposals/nginx-extensions.md | 2 +- embedded.go | 11 - internal/mode/provisioner/deployment.go | 43 -- internal/mode/provisioner/doc.go | 6 - internal/mode/provisioner/handler.go | 186 ------ internal/mode/provisioner/handler_test.go | 570 ------------------ internal/mode/provisioner/manager.go | 152 ----- .../provisioner/provisioner_suite_test.go | 14 - internal/mode/provisioner/store.go | 58 -- internal/mode/static/manager.go | 7 +- .../metrics/collectors/nginx_runtime.go | 108 ---- scripts/generate-manifests.sh | 7 - tests/Makefile | 33 +- tests/README.md | 10 - tests/conformance/provisioner/README.md | 47 -- .../conformance/provisioner/provisioner.yaml | 79 --- 33 files changed, 27 insertions(+), 1511 deletions(-) delete mode 100644 config/tests/static-deployment.yaml delete mode 100644 embedded.go delete mode 100644 internal/mode/provisioner/deployment.go delete mode 100644 internal/mode/provisioner/doc.go delete mode 100644 internal/mode/provisioner/handler.go delete mode 100644 internal/mode/provisioner/handler_test.go delete mode 100644 internal/mode/provisioner/manager.go delete mode 100644 internal/mode/provisioner/provisioner_suite_test.go delete mode 100644 internal/mode/provisioner/store.go delete mode 100644 internal/mode/static/metrics/collectors/nginx_runtime.go delete mode 100644 tests/conformance/provisioner/README.md delete mode 100644 tests/conformance/provisioner/provisioner.yaml diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml index 9d0fcda739..22b52ac0bd 100644 --- a/.github/workflows/conformance.yml +++ b/.github/workflows/conformance.yml @@ -76,13 +76,6 @@ jobs: type=ref,event=pr type=ref,event=branch,suffix=-rc,enable=${{ startsWith(github.ref, 'refs/heads/release') }} - - name: Generate static deployment - run: | - ngf_prefix=ghcr.io/nginx/nginx-gateway-fabric - ngf_tag=${{ steps.ngf-meta.outputs.version }} - make generate-static-deployment PLUS_ENABLED=${{ inputs.image == 'plus' && 'true' || 'false' }} PREFIX=${ngf_prefix} TAG=${ngf_tag} - working-directory: ./tests - - name: Build binary uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0 with: @@ -151,7 +144,6 @@ jobs: ngf_tag=${{ steps.ngf-meta.outputs.version }} if [ ${{ github.event_name }} == "schedule" ]; then export GW_API_VERSION=main; fi make helm-install-local${{ inputs.image == 'plus' && '-with-plus' || ''}} PREFIX=${ngf_prefix} TAG=${ngf_tag} - make deploy-updated-provisioner PREFIX=${ngf_prefix} TAG=${ngf_tag} working-directory: ./tests - name: Run conformance tests diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index b542aacb57..e29710d5fd 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -37,7 +37,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name={{ .Values.nginxGateway.gatewayControllerName }} - --gatewayclass={{ .Values.nginxGateway.gatewayClassName }} - --config={{ include "nginx-gateway.config-name" . }} diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index 5652a9f2dd..efa990e29d 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -21,7 +21,6 @@ import ( ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/nginx/nginx-gateway-fabric/internal/framework/file" - "github.com/nginx/nginx-gateway-fabric/internal/mode/provisioner" "github.com/nginx/nginx-gateway-fabric/internal/mode/static" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/licensing" @@ -53,7 +52,7 @@ func createRootCommand() *cobra.Command { return rootCmd } -func createStaticModeCommand() *cobra.Command { +func createControllerCommand() *cobra.Command { // flag names const ( gatewayFlag = "gateway" @@ -145,8 +144,8 @@ func createStaticModeCommand() *cobra.Command { ) cmd := &cobra.Command{ - Use: "static-mode", - Short: "Configure NGINX in the scope of a single Gateway resource", + Use: "controller", + Short: "Run the NGINX Gateway Fabric control plane", RunE: func(cmd *cobra.Command, _ []string) error { atom := zap.NewAtomicLevel() @@ -155,7 +154,7 @@ func createStaticModeCommand() *cobra.Command { commit, date, dirty := getBuildInfo() logger.Info( - "Starting NGINX Gateway Fabric in static mode", + "Starting the NGINX Gateway Fabric control plane", "version", version, "commit", commit, "date", date, @@ -443,56 +442,6 @@ func createStaticModeCommand() *cobra.Command { return cmd } -func createProvisionerModeCommand() *cobra.Command { - var ( - gatewayCtlrName = stringValidatingValue{ - validator: validateGatewayControllerName, - } - gatewayClassName = stringValidatingValue{ - validator: validateResourceName, - } - ) - - cmd := &cobra.Command{ - Use: "provisioner-mode", - Short: "Provision a static-mode NGINX Gateway Fabric Deployment per Gateway resource", - Hidden: true, - RunE: func(_ *cobra.Command, _ []string) error { - logger := ctlrZap.New() - commit, date, dirty := getBuildInfo() - logger.Info( - "Starting NGINX Gateway Fabric Provisioner", - "version", version, - "commit", commit, - "date", date, - "dirty", dirty, - ) - - return provisioner.StartManager(provisioner.Config{ - Logger: logger, - GatewayClassName: gatewayClassName.value, - GatewayCtlrName: gatewayCtlrName.value, - }) - }, - } - - cmd.Flags().Var( - &gatewayCtlrName, - gatewayCtlrNameFlag, - fmt.Sprintf(gatewayCtlrNameUsageFmt, domain), - ) - utilruntime.Must(cmd.MarkFlagRequired(gatewayCtlrNameFlag)) - - cmd.Flags().Var( - &gatewayClassName, - gatewayClassFlag, - gatewayClassNameUsage, - ) - utilruntime.Must(cmd.MarkFlagRequired(gatewayClassFlag)) - - return cmd -} - // FIXME(pleshakov): Remove this command once NGF min supported Kubernetes version supports sleep action in // preStop hook. // See https://github.com/kubernetes/enhancements/tree/4ec371d92dcd4f56a2ab18c8ba20bb85d8d20efe/keps/sig-node/3960-pod-lifecycle-sleep-action diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index e89a5a91dd..61459455f6 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -122,13 +122,9 @@ func TestCommonFlagsValidation(t *testing.T) { } for _, test := range tests { - t.Run(test.name+"_static_mode", func(t *testing.T) { + t.Run(test.name+"_controller", func(t *testing.T) { t.Parallel() - testFlag(t, createStaticModeCommand(), test) - }) - t.Run(test.name+"_provisioner_mode", func(t *testing.T) { - t.Parallel() - testFlag(t, createProvisionerModeCommand(), test) + testFlag(t, createControllerCommand(), test) }) } } @@ -439,28 +435,12 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() - cmd := createStaticModeCommand() + cmd := createControllerCommand() testFlag(t, cmd, test) }) } } -func TestProvisionerModeCmdFlagValidation(t *testing.T) { - t.Parallel() - testCase := flagTestCase{ - name: "valid flags", - args: []string{ - "--gateway-ctlr-name=gateway.nginx.org/nginx-gateway", // common and required flag - "--gatewayclass=nginx", // common and required flag - }, - wantErr: false, - } - - // common flags validation is tested separately - - testFlag(t, createProvisionerModeCommand(), testCase) -} - func TestSleepCmdFlagValidation(t *testing.T) { t.Parallel() tests := []flagTestCase{ diff --git a/cmd/gateway/main.go b/cmd/gateway/main.go index fc2a5949c7..203385b732 100644 --- a/cmd/gateway/main.go +++ b/cmd/gateway/main.go @@ -21,8 +21,7 @@ func main() { rootCmd := createRootCommand() rootCmd.AddCommand( - createStaticModeCommand(), - createProvisionerModeCommand(), + createControllerCommand(), createInitializeCommand(), createSleepCommand(), ) diff --git a/config/tests/static-deployment.yaml b/config/tests/static-deployment.yaml deleted file mode 100644 index 7c1b2df7e9..0000000000 --- a/config/tests/static-deployment.yaml +++ /dev/null @@ -1,82 +0,0 @@ ---- -# Source: nginx-gateway-fabric/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-gateway - namespace: nginx-gateway - labels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/version: "edge" -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - template: - metadata: - labels: - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/instance: nginx-gateway - spec: - containers: - - args: - - static-mode - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx - - --config=nginx-gateway-config - - --service=nginx-gateway - - --metrics-disable - - --health-port=8081 - - --leader-election-lock-name=nginx-gateway-leader-election - - --product-telemetry-disable - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - - name: INSTANCE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['app.kubernetes.io/instance'] - - name: IMAGE_NAME - value: ghcr.io/nginx/nginx-gateway-fabric:edge - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway - ports: - - name: agent-grpc - containerPort: 8443 - - name: health - containerPort: 8081 - readinessProbe: - httpGet: - path: /readyz - port: health - initialDelaySeconds: 3 - periodSeconds: 1 - securityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 101 - runAsGroup: 1001 - terminationGracePeriodSeconds: 30 - serviceAccountName: nginx-gateway - securityContext: - fsGroup: 1001 - runAsNonRoot: true diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index d49067708b..c6309d395e 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -200,7 +200,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index e7d0ef976d..88ce668193 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -200,7 +200,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index 42cc36de4a..b73922cdae 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -200,7 +200,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 9ac24a81da..23d4223234 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -204,7 +204,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index 15311817cc..7ae7821f26 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -204,7 +204,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index 6d6c1ca848..b6b6b1ca58 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -200,7 +200,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index fa29623d9a..206401aac1 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -200,7 +200,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index e2701bf885..0a62309fcc 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -210,7 +210,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 88b9371440..a25c7e1aa3 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -202,7 +202,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index dfe78332b5..288058bac2 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -202,7 +202,7 @@ spec: spec: containers: - args: - - static-mode + - controller - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - --config=nginx-gateway-config diff --git a/docs/developer/release-process.md b/docs/developer/release-process.md index 83278a5e1d..5179d68cc8 100644 --- a/docs/developer/release-process.md +++ b/docs/developer/release-process.md @@ -44,9 +44,8 @@ To create a new release, follow these steps: 1. Kick off the [longevity tests](https://github.com/nginx/nginx-gateway-fabric/blob/main/tests/README.md#longevity-testing) for both OSS and Plus. You'll need to create two clusters and VMs for this. Before running, update your `vars.env` file with the proper image tag and prefixes. NGF and nginx images will be available from `ghcr.io`, and nginx plus will be available in GCP (`us-docker.pkg.dev//nginx-gateway-fabric/nginx-plus`). These tests need to run for 4 days before releasing. The results should be committed to the main branch and then cherry-picked to the release branch. 2. Kick off the [NFR workflow](https://github.com/nginx/nginx-gateway-fabric/actions/workflows/nfr.yml) in the browser. For `image_tag`, use `release-X.X-rc`, and for `version`, use the upcoming `X.Y.Z` NGF version. Run the workflow on the new release branch. This will run all of the NFR tests which are automated and open a PR with the results files when it is complete. Review this PR and make any necessary changes before merging. Once merged, be sure to cherry-pick the commit to the main branch as well (the original PR targets the release branch). 5. Run the [Release PR](https://github.com/nginx/nginx-gateway-fabric/actions/workflows/release-pr.yml) workflow to update the repo files for the release. Then there are a few manual steps to complete: - 1. Update the version tag used in the [provisioner manifest](/tests/conformance/provisioner/provisioner.yaml) and [getting started guide](/site/content/get-started.md). - 2. Update the [README](/README.md) to include information about the release. - 3. Update the [changelog](/CHANGELOG.md). There is going to be a new blank section generated by the automation that needs to be adjusted accordingly. + 1. Update the [README](/README.md) to include information about the release. + 2. Update the [changelog](/CHANGELOG.md). There is going to be a new blank section generated by the automation that needs to be adjusted accordingly. - At the top there will be a list of all PRs that are labeled with `release-notes`. The changelog includes only important (from the user perspective) changes to NGF. This is in contrast with the autogenerated full changelog, which is created in the next diff --git a/docs/proposals/nginx-extensions.md b/docs/proposals/nginx-extensions.md index 497de1355f..6bf72fd300 100644 --- a/docs/proposals/nginx-extensions.md +++ b/docs/proposals/nginx-extensions.md @@ -155,7 +155,7 @@ spec: name: my-annotation ``` -Infrastructure labels and annotations should be applied to all resources created in response to the Gateway. This only applies to _automated deployments_ (i.e., provisioner mode), implementations that automatically deploy the data plane based on a Gateway. +Infrastructure labels and annotations should be applied to all resources created in response to the Gateway. Other use cases for this API are Service type, Service IP, CPU memory requests, affinity rules, and Gateway routability (public, private, and cluster). ### TLS Options diff --git a/embedded.go b/embedded.go deleted file mode 100644 index 0147f76f0e..0000000000 --- a/embedded.go +++ /dev/null @@ -1,11 +0,0 @@ -package embeddedfiles - -import _ "embed" - -// StaticModeDeploymentYAML contains the YAML manifest of the Deployment resource for the static mode. -// We put this in the root of the repo because goembed doesn't support relative/absolute paths and symlinks, -// and we want to keep the static mode deployment manifest for the provisioner in the config/tests/ -// directory. -// -//go:embed config/tests/static-deployment.yaml -var StaticModeDeploymentYAML []byte diff --git a/internal/mode/provisioner/deployment.go b/internal/mode/provisioner/deployment.go deleted file mode 100644 index 6de1579595..0000000000 --- a/internal/mode/provisioner/deployment.go +++ /dev/null @@ -1,43 +0,0 @@ -package provisioner - -import ( - "fmt" - "strings" - - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// prepareDeployment prepares a new the static mode Deployment based on the YAML manifest. -// It will use the specified id to set unique parts of the deployment, so it must be unique among all Deployments for -// Gateways. -// It will configure the Deployment to use the Gateway with the given NamespacedName. -func prepareDeployment(depYAML []byte, id string, gwNsName types.NamespacedName) (*v1.Deployment, error) { - dep := &v1.Deployment{} - if err := yaml.Unmarshal(depYAML, dep); err != nil { - return nil, fmt.Errorf("failed to unmarshal deployment: %w", err) - } - - dep.Name = id - dep.Spec.Selector.MatchLabels["app"] = id - dep.Spec.Template.Labels["app"] = id - - finalArgs := []string{ - "--gateway=" + gwNsName.String(), - "--update-gatewayclass-status=false", - } - - for _, arg := range dep.Spec.Template.Spec.Containers[0].Args { - if strings.Contains(arg, "leader-election-lock-name") { - lockNameArg := "--leader-election-lock-name=" + gwNsName.Name - finalArgs = append(finalArgs, lockNameArg) - } else { - finalArgs = append(finalArgs, arg) - } - } - - dep.Spec.Template.Spec.Containers[0].Args = finalArgs - - return dep, nil -} diff --git a/internal/mode/provisioner/doc.go b/internal/mode/provisioner/doc.go deleted file mode 100644 index 589ab68527..0000000000 --- a/internal/mode/provisioner/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package provisioner contains all the packages that relate to the provisioner-mode implementation of NGF. -Provisioner-mode implements data plane provisioning for NGINX Gateway Fabric (NGF): it creates an NGF static mode -Deployment for each Gateway that belongs to the provisioner GatewayClass. -*/ -package provisioner diff --git a/internal/mode/provisioner/handler.go b/internal/mode/provisioner/handler.go deleted file mode 100644 index b31145bd37..0000000000 --- a/internal/mode/provisioner/handler.go +++ /dev/null @@ -1,186 +0,0 @@ -package provisioner - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" -) - -type timeNowFunc func() metav1.Time - -// eventHandler ensures each Gateway for the specific GatewayClass has a corresponding Deployment -// of NGF configured to use that specific Gateway. -// -// eventHandler implements events.Handler interface. -type eventHandler struct { - gcName string - store *store - - // provisions maps NamespacedName of Gateway to its corresponding Deployment - provisions map[types.NamespacedName]*v1.Deployment - - statusUpdater *status.Updater - k8sClient client.Client - timeNow timeNowFunc - - staticModeDeploymentYAML []byte - - gatewayNextID int64 -} - -func newEventHandler( - gcName string, - statusUpdater *status.Updater, - k8sClient client.Client, - staticModeDeploymentYAML []byte, - timeNow timeNowFunc, -) *eventHandler { - return &eventHandler{ - store: newStore(), - provisions: make(map[types.NamespacedName]*v1.Deployment), - statusUpdater: statusUpdater, - gcName: gcName, - k8sClient: k8sClient, - staticModeDeploymentYAML: staticModeDeploymentYAML, - gatewayNextID: 1, - timeNow: timeNow, - } -} - -func (h *eventHandler) setGatewayClassStatuses(ctx context.Context) { - var reqs []status.UpdateRequest - - var gcExists bool - - for nsname, gc := range h.store.gatewayClasses { - // The order of conditions matters. Default conditions are added first so that any additional conditions will - // override them, which is ensured by DeduplicateConditions. - conds := conditions.NewDefaultGatewayClassConditions() - - if gc.Name == h.gcName { - gcExists = true - } else { - conds = append(conds, conditions.NewGatewayClassConflict()) - } - - // We ignore the boolean return value here because the provisioner only sets status, - // it does not generate config. - supportedVersionConds, _ := gatewayclass.ValidateCRDVersions(h.store.crdMetadata) - conds = append(conds, supportedVersionConds...) - - reqs = append(reqs, status.UpdateRequest{ - NsName: nsname, - ResourceType: &gatewayv1.GatewayClass{}, - Setter: func(obj client.Object) bool { - gc := helpers.MustCastObject[*gatewayv1.GatewayClass](obj) - - gcs := gatewayv1.GatewayClassStatus{ - Conditions: conditions.ConvertConditions(conditions.DeduplicateConditions(conds), gc.Generation, h.timeNow()), - } - - if status.ConditionsEqual(gc.Status.Conditions, gcs.Conditions) { - return false - } - - gc.Status = gcs - - return true - }, - }) - } - - if !gcExists { - panic(fmt.Errorf("GatewayClass %s must exist", h.gcName)) - } - - h.statusUpdater.Update(ctx, reqs...) -} - -func (h *eventHandler) ensureDeploymentsMatchGateways(ctx context.Context, logger logr.Logger) { - var gwsWithoutDeps, removedGwsWithDeps []types.NamespacedName - - for nsname, gw := range h.store.gateways { - if string(gw.Spec.GatewayClassName) != h.gcName { - continue - } - if _, exist := h.provisions[nsname]; exist { - continue - } - - gwsWithoutDeps = append(gwsWithoutDeps, nsname) - } - - for nsname := range h.provisions { - if _, exist := h.store.gateways[nsname]; exist { - continue - } - - removedGwsWithDeps = append(removedGwsWithDeps, nsname) - } - - // Create new deployments - - for _, nsname := range gwsWithoutDeps { - deployment, err := prepareDeployment(h.staticModeDeploymentYAML, h.generateDeploymentID(), nsname) - if err != nil { - panic(fmt.Errorf("failed to prepare deployment: %w", err)) - } - - if err = h.k8sClient.Create(ctx, deployment); err != nil { - panic(fmt.Errorf("failed to create deployment: %w", err)) - } - - h.provisions[nsname] = deployment - - logger.Info( - "Created deployment", - "deployment", client.ObjectKeyFromObject(deployment), - "gateway", nsname, - ) - } - - // Remove unnecessary deployments - - for _, nsname := range removedGwsWithDeps { - deployment := h.provisions[nsname] - - if err := h.k8sClient.Delete(ctx, deployment); err != nil { - panic(fmt.Errorf("failed to delete deployment: %w", err)) - } - - delete(h.provisions, nsname) - - logger.Info( - "Deleted deployment", - "deployment", client.ObjectKeyFromObject(deployment), - "gateway", nsname, - ) - } -} - -func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, batch events.EventBatch) { - h.store.update(batch) - h.setGatewayClassStatuses(ctx) - h.ensureDeploymentsMatchGateways(ctx, logger) -} - -func (h *eventHandler) generateDeploymentID() string { - // This approach will break if the provisioner is restarted, because the existing Gateways might get - // IDs different from the previous replica of the provisioner. - id := h.gatewayNextID - h.gatewayNextID++ - - return fmt.Sprintf("nginx-gateway-%d", id) -} diff --git a/internal/mode/provisioner/handler_test.go b/internal/mode/provisioner/handler_test.go deleted file mode 100644 index 97c870179e..0000000000 --- a/internal/mode/provisioner/handler_test.go +++ /dev/null @@ -1,570 +0,0 @@ -package provisioner - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - v1 "k8s.io/api/apps/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - . "github.com/onsi/gomega" - - embeddedfiles "github.com/nginx/nginx-gateway-fabric" - "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" -) - -var _ = Describe("handler", func() { - const ( - gcName = "test-gc" - ) - var ( - handler *eventHandler - - statusUpdater *status.Updater - k8sclient client.Client - crd *metav1.PartialObjectMetadata - gc *gatewayv1.GatewayClass - - fakeTimeNow timeNowFunc - ) - - BeforeEach(OncePerOrdered, func() { - scheme := runtime.NewScheme() - - Expect(gatewayv1.Install(scheme)).Should(Succeed()) - Expect(v1.AddToScheme(scheme)).Should(Succeed()) - Expect(apiext.AddToScheme(scheme)).Should(Succeed()) - - k8sclient = fake.NewClientBuilder(). - WithScheme(scheme). - WithStatusSubresource( - &gatewayv1.Gateway{}, - &gatewayv1.GatewayClass{}, - ). - Build() - - fakeTime := helpers.PrepareTimeForFakeClient(metav1.Now()) - fakeTimeNow = func() metav1.Time { - return fakeTime - } - - statusUpdater = status.NewUpdater(k8sclient, logr.Discard()) - - // Add GatewayClass CRD to the cluster - crd = &metav1.PartialObjectMetadata{ - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "gatewayclasses.gateway.networking.k8s.io", - Annotations: map[string]string{ - gatewayclass.BundleVersionAnnotation: gatewayclass.SupportedVersion, - }, - }, - } - - err := k8sclient.Create(context.Background(), crd) - Expect(err).ToNot(HaveOccurred()) - - gc = &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: gcName, - }, - } - }) - - createGateway := func(gwNsName types.NamespacedName) *gatewayv1.Gateway { - return &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: gwNsName.Namespace, - Name: gwNsName.Name, - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: gcName, - }, - } - } - - itShouldUpsertGatewayClass := func() { - // Add GatewayClass to the cluster - - err := k8sclient.Create(context.Background(), gc) - Expect(err).ToNot(HaveOccurred()) - - // UpsertGatewayClass and CRD - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: gc, - }, - &events.UpsertEvent{ - Resource: crd, - }, - } - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - // Ensure GatewayClass is accepted - - clusterGc := &gatewayv1.GatewayClass{} - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(gc), clusterGc) - - Expect(err).ToNot(HaveOccurred()) - - expectedConditions := []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "Accepted", - Message: "GatewayClass is accepted", - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "SupportedVersion", - Message: "Gateway API CRD versions are supported", - }, - } - - Expect(clusterGc.Status.Conditions).To(Equal(expectedConditions)) - } - - itShouldUpsertGateway := func(gwNsName types.NamespacedName, seqNumber int64) { - batch := []interface{}{ - &events.UpsertEvent{ - Resource: createGateway(gwNsName), - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - depNsName := types.NamespacedName{ - Namespace: "nginx-gateway", - Name: fmt.Sprintf("nginx-gateway-%d", seqNumber), - } - - dep := &v1.Deployment{} - err := k8sclient.Get(context.Background(), depNsName, dep) - - Expect(err).ToNot(HaveOccurred()) - - Expect(dep.ObjectMeta.Namespace).To(Equal("nginx-gateway")) - Expect(dep.ObjectMeta.Name).To(Equal(depNsName.Name)) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement("static-mode")) - expectedGwFlag := fmt.Sprintf("--gateway=%s", gwNsName.String()) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement(expectedGwFlag)) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement("--update-gatewayclass-status=false")) - expectedLockFlag := fmt.Sprintf("--leader-election-lock-name=%s", gwNsName.Name) - Expect(dep.Spec.Template.Spec.Containers[0].Args).To(ContainElement(expectedLockFlag)) - } - - itShouldUpsertCRD := func(version string, accepted bool) { - updatedCRD := crd - updatedCRD.Annotations[gatewayclass.BundleVersionAnnotation] = version - - err := k8sclient.Update(context.Background(), updatedCRD) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: updatedCRD, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - updatedGC := &gatewayv1.GatewayClass{} - - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(gc), updatedGC) - Expect(err).ToNot(HaveOccurred()) - - var expConds []metav1.Condition - if !accepted { - expConds = []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not supported. "+ - "Please install version %s", gatewayclass.SupportedVersion), - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not supported. "+ - "Please install version %s", gatewayclass.SupportedVersion), - }, - } - } else { - expConds = []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonAccepted), - Message: "GatewayClass is accepted", - }, - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(gatewayv1.GatewayClassReasonUnsupportedVersion), - Message: fmt.Sprintf("Gateway API CRD versions are not recommended. "+ - "Recommended version is %s", gatewayclass.SupportedVersion), - }, - } - } - - Expect(updatedGC.Status.Conditions).To(Equal(expConds)) - } - - itShouldPanicWhenUpsertingGateway := func(gwNsName types.NamespacedName) { - batch := []interface{}{ - &events.UpsertEvent{ - Resource: createGateway(gwNsName), - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - } - - Describe("Core cases", Ordered, func() { - var gwNsName1, gwNsName2 types.NamespacedName - - BeforeAll(func() { - gwNsName1 = types.NamespacedName{ - Namespace: "test-ns-1", - Name: "test-gw-1", - } - gwNsName2 = types.NamespacedName{ - Namespace: "test-ns-2", - Name: "test-gw-2", - } - - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - embeddedfiles.StaticModeDeploymentYAML, - fakeTimeNow, - ) - }) - - When("upserting GatewayClass", func() { - It("should make GatewayClass Accepted", func() { - itShouldUpsertGatewayClass() - }) - }) - - When("upserting first Gateway", func() { - It("should create first Deployment", func() { - itShouldUpsertGateway(gwNsName1, 1) - }) - }) - - When("upserting first Gateway again", func() { - It("must retain Deployment", func() { - itShouldUpsertGateway(gwNsName1, 1) - }) - }) - - When("upserting second Gateway", func() { - It("should create second Deployment", func() { - itShouldUpsertGateway(gwNsName2, 2) - }) - }) - - When("deleting first Gateway", func() { - It("should remove first Deployment", func() { - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName1, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - deps := &v1.DeploymentList{} - - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(HaveLen(1)) - Expect(deps.Items[0].ObjectMeta.Name).To(Equal("nginx-gateway-2")) - }) - }) - - When("deleting second Gateway", func() { - It("should remove second Deployment", func() { - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName2, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - deps := &v1.DeploymentList{} - - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(BeEmpty()) - }) - }) - - When("upserting Gateway for a different GatewayClass", func() { - It("should not create Deployment", func() { - gw := &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-gw-3", - Namespace: "test-ns-3", - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: "some-class", - }, - } - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: gw, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - deps := &v1.DeploymentList{} - err := k8sclient.List(context.Background(), deps) - - Expect(err).ToNot(HaveOccurred()) - Expect(deps.Items).To(BeEmpty()) - }) - }) - - When("upserting GatewayClass that is not set in command-line argument", func() { - It("should set the proper status if this controller is referenced", func() { - newGC := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "unknown-gc", - }, - Spec: gatewayv1.GatewayClassSpec{ - ControllerName: "test.example.com", - }, - } - - err := k8sclient.Create(context.Background(), newGC) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.UpsertEvent{ - Resource: newGC, - }, - &events.UpsertEvent{ - Resource: crd, - }, - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - unknownGC := &gatewayv1.GatewayClass{} - err = k8sclient.Get(context.Background(), client.ObjectKeyFromObject(newGC), unknownGC) - Expect(err).ToNot(HaveOccurred()) - - expectedConditions := []metav1.Condition{ - { - Type: string(gatewayv1.GatewayClassReasonSupportedVersion), - Status: metav1.ConditionTrue, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: "SupportedVersion", - Message: "Gateway API CRD versions are supported", - }, - { - Type: string(gatewayv1.GatewayClassConditionStatusAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 0, - LastTransitionTime: fakeTimeNow(), - Reason: string(conditions.GatewayClassReasonGatewayClassConflict), - Message: conditions.GatewayClassMessageGatewayClassConflict, - }, - } - Expect(unknownGC.Status.Conditions).To(Equal(expectedConditions)) - }) - }) - - When("upserting Gateway API CRD that is not a supported major version", func() { - It("should set the SupportedVersion and Accepted statuses to false on GatewayClass", func() { - itShouldUpsertCRD("v99.0.0", false /* accepted */) - }) - }) - - When("upserting Gateway API CRD that is not a supported minor version", func() { - It("should set the SupportedVersion status to false and Accepted status to true on GatewayClass", func() { - itShouldUpsertCRD("1.99.0", true /* accepted */) - }) - }) - }) - - Describe("Edge cases", func() { - var gwNsName types.NamespacedName - - BeforeEach(func() { - gwNsName = types.NamespacedName{ - Namespace: "test-ns", - Name: "test-gw", - } - - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - embeddedfiles.StaticModeDeploymentYAML, - fakeTimeNow, - ) - }) - - DescribeTable("Edge cases for events", - func(e interface{}) { - batch := []interface{}{e} - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }, - Entry("should panic for an unknown event type", - &struct{}{}), - Entry("should panic for an unknown type of resource in upsert event", - &events.UpsertEvent{ - Resource: &gatewayv1.HTTPRoute{}, - }), - Entry("should panic for an unknown type of resource in delete event", - &events.DeleteEvent{ - Type: &gatewayv1.HTTPRoute{}, - }), - ) - - When("upserting Gateway when GatewayClass doesn't exist", func() { - It("should panic", func() { - itShouldPanicWhenUpsertingGateway(gwNsName) - }) - }) - - When("upserting Gateway when Deployment can't be created", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - - // Create a deployment so that the Handler will fail to create it because it already exists. - - dep := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx-gateway-1", - }, - } - - err := k8sclient.Create(context.Background(), dep) - Expect(err).ToNot(HaveOccurred()) - - itShouldPanicWhenUpsertingGateway(gwNsName) - }) - }) - - When("deleting Gateway when Deployment can't be deleted", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - itShouldUpsertGateway(gwNsName, 1) - - // Delete the deployment so that the Handler will fail to delete it because it doesn't exist. - - dep := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "nginx-gateway", - Name: "nginx-gateway-1", - }, - } - - err := k8sclient.Delete(context.Background(), dep) - Expect(err).ToNot(HaveOccurred()) - - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.Gateway{}, - NamespacedName: gwNsName, - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }) - }) - - When("deleting GatewayClass", func() { - It("should panic", func() { - itShouldUpsertGatewayClass() - - batch := []interface{}{ - &events.DeleteEvent{ - Type: &gatewayv1.GatewayClass{}, - NamespacedName: types.NamespacedName{ - Name: gcName, - }, - }, - } - - handle := func() { - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - } - - Expect(handle).Should(Panic()) - }) - }) - - When("upserting Gateway with broken static Deployment YAML", func() { - It("it should panic", func() { - handler = newEventHandler( - gcName, - statusUpdater, - k8sclient, - []byte("broken YAML"), - fakeTimeNow, - ) - - itShouldUpsertGatewayClass() - itShouldPanicWhenUpsertingGateway(types.NamespacedName{Namespace: "test-ns", Name: "test-gw"}) - }) - }) - }) -}) diff --git a/internal/mode/provisioner/manager.go b/internal/mode/provisioner/manager.go deleted file mode 100644 index bb2b93b6e5..0000000000 --- a/internal/mode/provisioner/manager.go +++ /dev/null @@ -1,152 +0,0 @@ -package provisioner - -import ( - "fmt" - - "github.com/go-logr/logr" - v1 "k8s.io/api/apps/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - ctlr "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" - - embeddedfiles "github.com/nginx/nginx-gateway-fabric" - "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" - "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/predicate" - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" - "github.com/nginx/nginx-gateway-fabric/internal/framework/gatewayclass" - "github.com/nginx/nginx-gateway-fabric/internal/framework/status" - ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" -) - -// Config is configuration for the provisioner mode. -type Config struct { - Logger logr.Logger - GatewayClassName string - GatewayCtlrName string -} - -// StartManager starts a Manager for the provisioner mode, which provisions -// a Deployment of NGF (static mode) for each Gateway of the provisioner GatewayClass. -// -// The provisioner mode is introduced to allow running Gateway API conformance tests for NGF, which expects -// an independent data plane instance being provisioned for each Gateway. -// -// The provisioner mode is not intended to be used in production (in the short term), as it lacks support for -// many important features. See https://github.com/nginx/nginx-gateway-fabric/issues/634 for more details. -func StartManager(cfg Config) error { - scheme := runtime.NewScheme() - utilruntime.Must(gatewayv1.Install(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) - utilruntime.Must(apiext.AddToScheme(scheme)) - - options := manager.Options{ - Scheme: scheme, - Logger: cfg.Logger, - } - clusterCfg := ctlr.GetConfigOrDie() - - mgr, err := manager.New(clusterCfg, options) - if err != nil { - return fmt.Errorf("cannot build runtime manager: %w", err) - } - - crdWithGVK := apiext.CustomResourceDefinition{} - crdWithGVK.SetGroupVersionKind( - schema.GroupVersionKind{Group: apiext.GroupName, Version: "v1", Kind: "CustomResourceDefinition"}, - ) - - // Note: for any new object type or a change to the existing one, - // make sure to also update firstBatchPreparer creation below - controllerRegCfgs := []struct { - objectType ngftypes.ObjectType - options []controller.Option - }{ - { - objectType: &gatewayv1.GatewayClass{}, - options: []controller.Option{ - controller.WithK8sPredicate(predicate.GatewayClassPredicate{ControllerName: cfg.GatewayCtlrName}), - }, - }, - { - objectType: &gatewayv1.Gateway{}, - }, - { - objectType: &crdWithGVK, - options: []controller.Option{ - controller.WithOnlyMetadata(), - controller.WithK8sPredicate( - predicate.AnnotationPredicate{Annotation: gatewayclass.BundleVersionAnnotation}, - ), - }, - }, - } - - ctx := ctlr.SetupSignalHandler() - eventCh := make(chan interface{}) - - for _, regCfg := range controllerRegCfgs { - if err := controller.Register( - ctx, - regCfg.objectType, - regCfg.objectType.GetObjectKind().GroupVersionKind().Kind, - mgr, - eventCh, - regCfg.options..., - ); err != nil { - return fmt.Errorf("cannot register controller for %T: %w", regCfg.objectType, err) - } - } - - partialObjectMetadataList := &metav1.PartialObjectMetadataList{} - partialObjectMetadataList.SetGroupVersionKind( - schema.GroupVersionKind{ - Group: apiext.GroupName, - Version: "v1", - Kind: "CustomResourceDefinition", - }, - ) - - firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( - mgr.GetCache(), - []client.Object{ - &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: cfg.GatewayClassName}}, - }, - []client.ObjectList{ - &gatewayv1.GatewayList{}, - partialObjectMetadataList, - }, - ) - - statusUpdater := status.NewUpdater( - mgr.GetClient(), - cfg.Logger.WithName("statusUpdater"), - ) - - handler := newEventHandler( - cfg.GatewayClassName, - statusUpdater, - mgr.GetClient(), - embeddedfiles.StaticModeDeploymentYAML, - metav1.Now, - ) - - eventLoop := events.NewEventLoop( - eventCh, - cfg.Logger.WithName("eventLoop"), - handler, - firstBatchPreparer, - ) - - if err := mgr.Add(eventLoop); err != nil { - return fmt.Errorf("cannot register event loop: %w", err) - } - - cfg.Logger.Info("Starting manager") - return mgr.Start(ctx) -} diff --git a/internal/mode/provisioner/provisioner_suite_test.go b/internal/mode/provisioner/provisioner_suite_test.go deleted file mode 100644 index 1435a2230e..0000000000 --- a/internal/mode/provisioner/provisioner_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package provisioner - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestProvisioner(t *testing.T) { - t.Parallel() - RegisterFailHandler(Fail) - RunSpecs(t, "Provisioner Suite") -} diff --git a/internal/mode/provisioner/store.go b/internal/mode/provisioner/store.go deleted file mode 100644 index ebc6afcc17..0000000000 --- a/internal/mode/provisioner/store.go +++ /dev/null @@ -1,58 +0,0 @@ -package provisioner - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - v1 "sigs.k8s.io/gateway-api/apis/v1" - - "github.com/nginx/nginx-gateway-fabric/internal/framework/events" -) - -// store stores the cluster state needed by the provisioner and allows to update it from the events. -type store struct { - gatewayClasses map[types.NamespacedName]*v1.GatewayClass - gateways map[types.NamespacedName]*v1.Gateway - crdMetadata map[types.NamespacedName]*metav1.PartialObjectMetadata -} - -func newStore() *store { - return &store{ - gatewayClasses: make(map[types.NamespacedName]*v1.GatewayClass), - gateways: make(map[types.NamespacedName]*v1.Gateway), - crdMetadata: make(map[types.NamespacedName]*metav1.PartialObjectMetadata), - } -} - -func (s *store) update(batch events.EventBatch) { - for _, event := range batch { - switch e := event.(type) { - case *events.UpsertEvent: - switch obj := e.Resource.(type) { - case *v1.GatewayClass: - s.gatewayClasses[client.ObjectKeyFromObject(obj)] = obj - case *v1.Gateway: - s.gateways[client.ObjectKeyFromObject(obj)] = obj - case *metav1.PartialObjectMetadata: - s.crdMetadata[client.ObjectKeyFromObject(obj)] = obj - default: - panic(fmt.Errorf("unknown resource type %T", e.Resource)) - } - case *events.DeleteEvent: - switch e.Type.(type) { - case *v1.GatewayClass: - delete(s.gatewayClasses, e.NamespacedName) - case *v1.Gateway: - delete(s.gateways, e.NamespacedName) - case *metav1.PartialObjectMetadata: - delete(s.crdMetadata, e.NamespacedName) - default: - panic(fmt.Errorf("unknown resource type %T", e.Type)) - } - default: - panic(fmt.Errorf("unknown event type %T", e)) - } - } -} diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 930be5d01e..3459a7134f 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -150,18 +150,13 @@ func StartManager(cfg config.Config) error { if cfg.MetricsConfig.Enabled { constLabels := map[string]string{"class": cfg.GatewayClassName} - ngxruntimeCollector := collectors.NewManagerMetricsCollector(constLabels) handlerCollector = collectors.NewControllerCollector(constLabels) - handlerCollector, ok := handlerCollector.(prometheus.Collector) if !ok { return fmt.Errorf("handlerCollector is not a prometheus.Collector: %w", frameworkStatus.ErrFailedAssert) } - metrics.Registry.MustRegister( - ngxruntimeCollector, - handlerCollector, - ) + metrics.Registry.MustRegister(handlerCollector) } statusUpdater := frameworkStatus.NewUpdater( diff --git a/internal/mode/static/metrics/collectors/nginx_runtime.go b/internal/mode/static/metrics/collectors/nginx_runtime.go deleted file mode 100644 index c762171d76..0000000000 --- a/internal/mode/static/metrics/collectors/nginx_runtime.go +++ /dev/null @@ -1,108 +0,0 @@ -package collectors - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/metrics" -) - -// MetricsCollector is an interface for the metrics of the NGINX runtime manager. -// -//counterfeiter:generate . MetricsCollector -type MetricsCollector interface { - IncReloadCount() - IncReloadErrors() - ObserveLastReloadTime(ms time.Duration) -} - -// NginxRuntimeCollector implements runtime.Collector interface and prometheus.Collector interface. -type NginxRuntimeCollector struct { - // Metrics - reloadsTotal prometheus.Counter - reloadsError prometheus.Counter - configStale prometheus.Gauge - reloadsDuration prometheus.Histogram -} - -// NewManagerMetricsCollector creates a new NginxRuntimeCollector. -func NewManagerMetricsCollector(constLabels map[string]string) *NginxRuntimeCollector { - nc := &NginxRuntimeCollector{ - reloadsTotal: prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "nginx_reloads_total", - Namespace: metrics.Namespace, - Help: "Number of successful NGINX reloads", - ConstLabels: constLabels, - }), - reloadsError: prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "nginx_reload_errors_total", - Namespace: metrics.Namespace, - Help: "Number of unsuccessful NGINX reloads", - ConstLabels: constLabels, - }, - ), - configStale: prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "nginx_stale_config", - Namespace: metrics.Namespace, - Help: "Indicates if NGINX is not serving the latest configuration.", - ConstLabels: constLabels, - }, - ), - reloadsDuration: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "nginx_reloads_milliseconds", - Namespace: metrics.Namespace, - Help: "Duration in milliseconds of NGINX reloads", - ConstLabels: constLabels, - Buckets: []float64{500, 1000, 5000, 10000, 30000}, - }, - ), - } - return nc -} - -// IncReloadCount increments the counter of successful NGINX reloads and sets the stale config status to false. -func (c *NginxRuntimeCollector) IncReloadCount() { - c.reloadsTotal.Inc() - c.updateConfigStaleStatus(false) -} - -// IncReloadErrors increments the counter of NGINX reload errors and sets the stale config status to true. -func (c *NginxRuntimeCollector) IncReloadErrors() { - c.reloadsError.Inc() - c.updateConfigStaleStatus(true) -} - -// updateConfigStaleStatus updates the last NGINX reload status metric. -func (c *NginxRuntimeCollector) updateConfigStaleStatus(stale bool) { - var status float64 - if stale { - status = 1.0 - } - c.configStale.Set(status) -} - -// ObserveLastReloadTime adds the last NGINX reload time to the histogram. -func (c *NginxRuntimeCollector) ObserveLastReloadTime(duration time.Duration) { - c.reloadsDuration.Observe(float64(duration / time.Millisecond)) -} - -// Describe implements prometheus.Collector interface Describe method. -func (c *NginxRuntimeCollector) Describe(ch chan<- *prometheus.Desc) { - c.reloadsTotal.Describe(ch) - c.reloadsError.Describe(ch) - c.configStale.Describe(ch) - c.reloadsDuration.Describe(ch) -} - -// Collect implements the prometheus.Collector interface Collect method. -func (c *NginxRuntimeCollector) Collect(ch chan<- prometheus.Metric) { - c.reloadsTotal.Collect(ch) - c.reloadsError.Collect(ch) - c.configStale.Collect(ch) - c.reloadsDuration.Collect(ch) -} diff --git a/scripts/generate-manifests.sh b/scripts/generate-manifests.sh index f52743e382..ca3e9c2041 100755 --- a/scripts/generate-manifests.sh +++ b/scripts/generate-manifests.sh @@ -30,10 +30,3 @@ done # For OpenShift, we don't need a Helm example so we generate the manifests from the default values.yaml generate_manifests openshift - -# FIXME(lucacome): Implement a better way to generate the static deployment file -# https://github.com/nginx/nginx-gateway-fabric/issues/2326 -helm template nginx-gateway charts/nginx-gateway-fabric --set nameOverride=nginx-gateway --set nginxGateway.metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml >config/tests/static-deployment.yaml -sed -i.bak '/app.kubernetes.io\/managed-by: Helm/d' config/tests/static-deployment.yaml -sed -i.bak '/helm.sh/d' config/tests/static-deployment.yaml -rm -f config/tests/static-deployment.yaml.bak diff --git a/tests/Makefile b/tests/Makefile index a7e8e5717f..dd82633d76 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -13,7 +13,6 @@ GW_SVC_GKE_INTERNAL = false NGF_VERSION ?= edge## NGF version to be tested PULL_POLICY = Never## Pull policy for the images NGINX_CONF_DIR = internal/mode/static/nginx/conf -PROVISIONER_MANIFEST = conformance/provisioner/provisioner.yaml SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation,GatewayInfrastructurePropagation STANDARD_CONFORMANCE_PROFILES = GATEWAY-HTTP,GATEWAY-GRPC EXPERIMENTAL_CONFORMANCE_PROFILES = GATEWAY-TLS @@ -74,9 +73,6 @@ cleanup-conformance-tests: ## Clean up conformance tests fixtures kubectl delete pod conformance kubectl delete -f conformance/conformance-rbac.yaml -.PHONY: build -build: generate-static-deployment - .PHONY: reset-go-modules reset-go-modules: ## Reset the go modules changes git checkout -- ../go.mod ../go.sum @@ -169,16 +165,7 @@ delete-gke-cluster: ## Delete the GKE cluster add-local-ip-to-cluster: ## Add local IP to the GKE cluster master-authorized-networks ./scripts/add-local-ip-auth-networks.sh -HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginxGateway.kind=skip --set nginx.service.type=ClusterIP --skip-schema-validation - -.PHONY: deploy-updated-provisioner -deploy-updated-provisioner: ## Update provisioner manifest and deploy to the configured kind cluster - yq '(select(di != 3))' $(PROVISIONER_MANIFEST) | kubectl apply -f - - yq '(select(.spec.template.spec.containers[].image) | .spec.template.spec.containers[].image="$(PREFIX):$(TAG)" | .spec.template.spec.containers[].imagePullPolicy = "Never")' $(PROVISIONER_MANIFEST) | kubectl apply -f - - -.PHONY: generate-static-deployment -generate-static-deployment: - helm template nginx-gateway $(CHART_DIR) --set nameOverride=nginx-gateway --set metrics.enable=false --set nginxGateway.productTelemetry.enable=false -n nginx-gateway -s templates/deployment.yaml --set nginxGateway.image.repository=$(PREFIX) --set nginxGateway.image.tag=$(TAG) --set nginxGateway.image.pullPolicy=Never --set nginx.image.repository=$(NGINX_PREFIX) --set nginx.image.tag=$(TAG) --set nginx.image.pullPolicy=Never --set nginxGateway.gwAPIExperimentalFeatures.enable=$(ENABLE_EXPERIMENTAL) --set nginx.plus=$(PLUS_ENABLED) --set nginx.usage.endpoint=$(PLUS_USAGE_ENDPOINT) > $(SELF_DIR)config/tests/static-deployment.yaml +HELM_PARAMETERS += --set nameOverride=nginx-gateway --set nginx.service.type=ClusterIP --skip-schema-validation # this target is used to install the gateway-api CRDs from the main branch (only used in the nightly CI job) # it overrides the target in the main Makefile when the GW_API_VERSION is set to main @@ -187,27 +174,15 @@ install-gateway-crds: kubectl kustomize "https://github.com/kubernetes-sigs/gateway-api/config/crd/$(if $(filter true,$(ENABLE_EXPERIMENTAL)),experimental,)?timeout=120&ref=main" | kubectl apply -f - endif -.PHONY: install-ngf-local-build -install-ngf-local-build: deploy-updated-provisioner - .PHONY: install-ngf-local-no-build -install-ngf-local-no-build: load-images helm-install-local deploy-updated-provisioner ## Install NGF from local build with provisioner on configured kind cluster but do not build the NGF image - -.PHONY: install-ngf-local-build-with-plus -install-ngf-local-build-with-plus: deploy-updated-provisioner +install-ngf-local-no-build: load-images helm-install-local ## Install NGF from local build on configured kind cluster but do not build the NGF image .PHONY: install-ngf-local-no-build-with-plus -install-ngf-local-no-build-with-plus: load-images-with-plus helm-install-local-with-plus deploy-updated-provisioner ## Install NGF with Plus from local build with provisioner on configured kind cluster but do not build the NGF image - -.PHONY: install-ngf-edge -install-ngf-edge: load-images helm-install-local ## Install NGF with provisioner from edge on configured kind cluster - kubectl apply -f $(PROVISIONER_MANIFEST) +install-ngf-local-no-build-with-plus: load-images-with-plus helm-install-local-with-plus ## Install NGF with Plus from local build on configured kind cluster but do not build the NGF image .PHONY: uninstall-ngf -uninstall-ngf: ## Uninstall NGF on configured kind cluster and undo manifest changes +uninstall-ngf: ## Uninstall NGF on configured kind cluster -helm uninstall nginx-gateway -n nginx-gateway -make uninstall-gateway-crds - -kubectl delete clusterrole nginx-gateway-provisioner - -kubectl delete clusterrolebinding nginx-gateway-provisioner -kubectl delete namespace nginx-gateway -kubectl kustomize ../config/crd | kubectl delete -f - diff --git a/tests/README.md b/tests/README.md index 1db86ebc3b..16df0bc36a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -19,7 +19,6 @@ This directory contains the tests for NGINX Gateway Fabric. The tests are divide - [Step 1 - Install NGINX Gateway Fabric to configured kind cluster](#step-1---install-nginx-gateway-fabric-to-configured-kind-cluster) - [Option 1 - Build and install NGINX Gateway Fabric from local to configured kind cluster](#option-1---build-and-install-nginx-gateway-fabric-from-local-to-configured-kind-cluster) - [Option 2 - Install NGINX Gateway Fabric from local already built image to configured kind cluster](#option-2---install-nginx-gateway-fabric-from-local-already-built-image-to-configured-kind-cluster) - - [Option 3 - Install NGINX Gateway Fabric from edge to configured kind cluster](#option-3---install-nginx-gateway-fabric-from-edge-to-configured-kind-cluster) - [Step 2 - Build conformance test runner image](#step-2---build-conformance-test-runner-image) - [Step 3 - Run Gateway conformance tests](#step-3---run-gateway-conformance-tests) - [Step 4 - Cleanup the conformance test fixtures and uninstall NGINX Gateway Fabric](#step-4---cleanup-the-conformance-test-fixtures-and-uninstall-nginx-gateway-fabric) @@ -158,15 +157,6 @@ Or, to install NGF with NGINX Plus enabled: make install-ngf-local-no-build-with-plus ``` -#### Option 3 - Install NGINX Gateway Fabric from edge to configured kind cluster - -You can also skip the build NGF image step and prepare the environment to instead use the `edge` image. Note that this -option does not currently support installing with NGINX Plus enabled. - -```makefile -make install-ngf-edge -``` - ### Step 2 - Build conformance test runner image > Note: If you want to run the latest conformance tests from the Gateway API `main` branch, run the following diff --git a/tests/conformance/provisioner/README.md b/tests/conformance/provisioner/README.md deleted file mode 100644 index c9f5bc36ae..0000000000 --- a/tests/conformance/provisioner/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Provisioner - -Provisioner implements data plane provisioning for NGINX Gateway Fabric (NGF): it creates an NGF static mode -Deployment for each Gateway that belongs to the provisioner GatewayClass. - -```text -Usage: - gateway provisioner-mode [flags] - -Flags: - -h, --help help for provisioner-mode - -Global Flags: - --gateway-ctlr-name string The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain is 'gateway.nginx.org' (default "") - --gatewayclass string The name of the GatewayClass resource. Every NGINX Gateway Fabric must have a unique corresponding GatewayClass resource. (default "") -``` - -> Note: Provisioner is not ready for production yet (see this issue for more details -https://github.com/nginx/nginx-gateway-fabric/issues/634). However, it can be used in the Gateway API conformance -tests, which expect a Gateway API implementation to provision an independent data plane per Gateway. -> -> Note: Provisioner uses [this manifest](https://github.com/nginx/nginx-gateway-fabric/blob/main/config/tests/static-deployment.yaml) -to create an NGF static mode Deployment. -> This manifest gets included into the NGF binary during the NGF build. To customize the Deployment, modify the -manifest and **re-build** NGF. - -How to deploy: - -1. Follow the [installation](https://docs.nginx.com/nginx-gateway-fabric/installation/) instructions up until the Deploy the NGINX Gateway Fabric step - to deploy prerequisites for both the static mode Deployments and the provisioner. -1. Deploy provisioner: - - ```shell - kubectl apply -f provisioner.yaml - ``` - -1. Confirm the provisioner is running in nginx-gateway namespace: - - ```shell - kubectl get pods -n nginx-gateway - ``` - - ```text - - NAME READY STATUS RESTARTS AGE - nginx-gateway-provisioner-6c9d9fdcb8-b2pf8 1/1 Running 0 11m - ``` diff --git a/tests/conformance/provisioner/provisioner.yaml b/tests/conformance/provisioner/provisioner.yaml deleted file mode 100644 index 07ebae4a45..0000000000 --- a/tests/conformance/provisioner/provisioner.yaml +++ /dev/null @@ -1,79 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx-gateway-provisioner - namespace: nginx-gateway ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nginx-gateway-provisioner -rules: -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses - - gateways - verbs: - - list - - watch -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses/status - verbs: - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nginx-gateway-provisioner -subjects: -- kind: ServiceAccount - name: nginx-gateway-provisioner - namespace: nginx-gateway -roleRef: - kind: ClusterRole - name: nginx-gateway-provisioner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-gateway-provisioner - namespace: nginx-gateway -spec: - replicas: 1 - selector: - matchLabels: - app: nginx-gateway-provisioner - template: - metadata: - labels: - app: nginx-gateway-provisioner - spec: - serviceAccountName: nginx-gateway-provisioner - containers: - - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway-provisioner - securityContext: - runAsUser: 1001 - args: - - provisioner-mode - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx From 64955f12ff5fd4d8cd2407b2191d4a6763ad9fb4 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Tue, 4 Mar 2025 15:30:45 -0700 Subject: [PATCH 14/32] Update counterfeiter commands --- internal/mode/static/nginx/agent/agent.go | 2 +- internal/mode/static/nginx/agent/broadcast/broadcast.go | 2 +- internal/mode/static/nginx/agent/grpc/connections.go | 2 +- internal/mode/static/nginx/agent/grpc/messenger/messenger.go | 2 +- internal/mode/static/provisioner/provisioner.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 3d2ff78b4a..1d839c3bc9 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -21,7 +21,7 @@ import ( const retryUpstreamTimeout = 5 * time.Second -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate //counterfeiter:generate . NginxUpdater diff --git a/internal/mode/static/nginx/agent/broadcast/broadcast.go b/internal/mode/static/nginx/agent/broadcast/broadcast.go index 2b21ae1117..ddc0854b3d 100644 --- a/internal/mode/static/nginx/agent/broadcast/broadcast.go +++ b/internal/mode/static/nginx/agent/broadcast/broadcast.go @@ -8,7 +8,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" ) -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate //counterfeiter:generate . Broadcaster diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go index 6b30ce4b59..e0534a78f9 100644 --- a/internal/mode/static/nginx/agent/grpc/connections.go +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -6,7 +6,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate //counterfeiter:generate . ConnectionsTracker diff --git a/internal/mode/static/nginx/agent/grpc/messenger/messenger.go b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go index dde16c74f3..7e7fbd2b4c 100644 --- a/internal/mode/static/nginx/agent/grpc/messenger/messenger.go +++ b/internal/mode/static/nginx/agent/grpc/messenger/messenger.go @@ -7,7 +7,7 @@ import ( pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" ) -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate //counterfeiter:generate . Messenger diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index 643fb3c6ff..7165805a44 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -30,7 +30,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate //counterfeiter:generate . Provisioner From cab059e53c7561713c89b4136a64c52d048d3a7e Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 5 Mar 2025 13:14:50 -0700 Subject: [PATCH 15/32] CP/DP split: update/delete user secrets (#3193) Problem: When a user updates or deletes their docker registry or NGINX Plus secrets, those changes need to be propagated to all duplicate secrets that we've provisioned for the Gateway resources. Solution: If updated, update the provisioned secret. If deleted, delete the provisioned secret. --- .../controller/predicate/annotation.go | 12 +- .../framework/controller/predicate/secret.go | 77 +++++++ .../controller/predicate/secret_test.go | 194 ++++++++++++++++++ internal/framework/controller/resource.go | 15 +- internal/mode/static/provisioner/eventloop.go | 29 ++- internal/mode/static/provisioner/handler.go | 106 +++++++++- .../mode/static/provisioner/handler_test.go | 170 ++++++++++++++- .../mode/static/provisioner/provisioner.go | 51 ++++- .../static/provisioner/provisioner_test.go | 2 +- internal/mode/static/provisioner/store.go | 7 + .../mode/static/provisioner/store_test.go | 31 +++ 11 files changed, 669 insertions(+), 25 deletions(-) create mode 100644 internal/framework/controller/predicate/secret.go create mode 100644 internal/framework/controller/predicate/secret_test.go diff --git a/internal/framework/controller/predicate/annotation.go b/internal/framework/controller/predicate/annotation.go index 46b48660de..c6c34585f6 100644 --- a/internal/framework/controller/predicate/annotation.go +++ b/internal/framework/controller/predicate/annotation.go @@ -19,24 +19,24 @@ type AnnotationPredicate struct { } // Create filters CreateEvents based on the Annotation. -func (cp AnnotationPredicate) Create(e event.CreateEvent) bool { +func (ap AnnotationPredicate) Create(e event.CreateEvent) bool { if e.Object == nil { return false } - _, ok := e.Object.GetAnnotations()[cp.Annotation] + _, ok := e.Object.GetAnnotations()[ap.Annotation] return ok } // Update filters UpdateEvents based on the Annotation. -func (cp AnnotationPredicate) Update(e event.UpdateEvent) bool { +func (ap AnnotationPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil || e.ObjectNew == nil { // this case should not happen return false } - oldAnnotationVal := e.ObjectOld.GetAnnotations()[cp.Annotation] - newAnnotationVal := e.ObjectNew.GetAnnotations()[cp.Annotation] + oldAnnotationVal := e.ObjectOld.GetAnnotations()[ap.Annotation] + newAnnotationVal := e.ObjectNew.GetAnnotations()[ap.Annotation] return oldAnnotationVal != newAnnotationVal } @@ -52,7 +52,7 @@ type RestartDeploymentAnnotationPredicate struct { } // Update filters UpdateEvents based on if the annotation is present or changed. -func (cp RestartDeploymentAnnotationPredicate) Update(e event.UpdateEvent) bool { +func (RestartDeploymentAnnotationPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil || e.ObjectNew == nil { // this case should not happen return false diff --git a/internal/framework/controller/predicate/secret.go b/internal/framework/controller/predicate/secret.go new file mode 100644 index 0000000000..0e28679d89 --- /dev/null +++ b/internal/framework/controller/predicate/secret.go @@ -0,0 +1,77 @@ +package predicate + +import ( + "slices" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// SecretNamePredicate implements a predicate function that returns true if the Secret matches the expected +// namespace and one of the expected names. +type SecretNamePredicate struct { + predicate.Funcs + Namespace string + SecretNames []string +} + +// Create filters CreateEvents based on the Secret name. +func (sp SecretNamePredicate) Create(e event.CreateEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Update filters UpdateEvents based on the Secret name. +func (sp SecretNamePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectNew == nil { + return false + } + + if secret, ok := e.ObjectNew.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Delete filters DeleteEvents based on the Secret name. +func (sp SecretNamePredicate) Delete(e event.DeleteEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +// Generic filters GenericEvents based on the Secret name. +func (sp SecretNamePredicate) Generic(e event.GenericEvent) bool { + if e.Object == nil { + return false + } + + if secret, ok := e.Object.(*corev1.Secret); ok { + return secretMatches(secret, sp.Namespace, sp.SecretNames) + } + + return false +} + +func secretMatches(secret *corev1.Secret, namespace string, names []string) bool { + if secret.GetNamespace() != namespace { + return false + } + + return slices.Contains(names, secret.GetName()) +} diff --git a/internal/framework/controller/predicate/secret_test.go b/internal/framework/controller/predicate/secret_test.go new file mode 100644 index 0000000000..a9574dbb8c --- /dev/null +++ b/internal/framework/controller/predicate/secret_test.go @@ -0,0 +1,194 @@ +package predicate + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestSecretNamePredicate(t *testing.T) { + t.Parallel() + + pred := SecretNamePredicate{ + Namespace: "test-namespace", + SecretNames: []string{"secret1", "secret2"}, + } + + tests := []struct { + createEvent *event.CreateEvent + updateEvent *event.UpdateEvent + deleteEvent *event.DeleteEvent + genericEvent *event.GenericEvent + name string + expUpdate bool + }{ + { + name: "Create event with matching secret", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Create event with non-matching secret", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Create event with non-matching namespace", + createEvent: &event.CreateEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Update event with matching secret", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret2", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Update event with non-matching secret", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Update event with non-matching namespace", + updateEvent: &event.UpdateEvent{ + ObjectNew: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Delete event with matching secret", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Delete event with non-matching secret", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Delete event with non-matching namespace", + deleteEvent: &event.DeleteEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Generic event with matching secret", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: true, + }, + { + name: "Generic event with non-matching secret", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "test-namespace", + }, + }, + }, + expUpdate: false, + }, + { + name: "Generic event with non-matching namespace", + genericEvent: &event.GenericEvent{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "other-namespace", + }, + }, + }, + expUpdate: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + var result bool + switch { + case test.createEvent != nil: + result = pred.Create(*test.createEvent) + case test.updateEvent != nil: + result = pred.Update(*test.updateEvent) + case test.deleteEvent != nil: + result = pred.Delete(*test.deleteEvent) + default: + result = pred.Generic(*test.genericEvent) + } + + g.Expect(test.expUpdate).To(Equal(result)) + }) + } +} diff --git a/internal/framework/controller/resource.go b/internal/framework/controller/resource.go index 2fff439a50..464d2ee90f 100644 --- a/internal/framework/controller/resource.go +++ b/internal/framework/controller/resource.go @@ -1,9 +1,22 @@ package controller -import "fmt" +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) // CreateNginxResourceName creates the base resource name for all nginx resources // created by the control plane. func CreateNginxResourceName(prefix, suffix string) string { return fmt.Sprintf("%s-%s", prefix, suffix) } + +// ObjectMetaToNamespacedName converts ObjectMeta to NamespacedName. +func ObjectMetaToNamespacedName(meta metav1.ObjectMeta) types.NamespacedName { + return types.NamespacedName{ + Namespace: meta.Namespace, + Name: meta.Name, + } +} diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go index 5f080156bd..6b50e79e38 100644 --- a/internal/mode/static/provisioner/eventloop.go +++ b/internal/mode/static/provisioner/eventloop.go @@ -18,6 +18,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/predicate" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) func newEventLoop( @@ -26,11 +27,30 @@ func newEventLoop( handler *eventHandler, logger logr.Logger, selector metav1.LabelSelector, + ngfNamespace string, + dockerSecrets []string, + usageConfig *config.UsageReportConfig, ) (*events.EventLoop, error) { nginxResourceLabelPredicate := predicate.NginxLabelPredicate(selector) + secretsToWatch := make([]string, 0, len(dockerSecrets)+3) + secretsToWatch = append(secretsToWatch, dockerSecrets...) + + if usageConfig != nil { + if usageConfig.SecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.SecretName) + } + if usageConfig.CASecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.CASecretName) + } + if usageConfig.ClientSSLSecretName != "" { + secretsToWatch = append(secretsToWatch, usageConfig.ClientSSLSecretName) + } + } + controllerRegCfgs := []struct { objectType ngftypes.ObjectType + name string options []controller.Option }{ { @@ -85,15 +105,18 @@ func newEventLoop( options: []controller.Option{ controller.WithK8sPredicate( k8spredicate.And( - k8spredicate.GenerationChangedPredicate{}, - nginxResourceLabelPredicate, + k8spredicate.ResourceVersionChangedPredicate{}, + k8spredicate.Or( + nginxResourceLabelPredicate, + predicate.SecretNamePredicate{Namespace: ngfNamespace, SecretNames: secretsToWatch}, + ), ), ), }, }, } - eventCh := make(chan interface{}) + eventCh := make(chan any) for _, regCfg := range controllerRegCfgs { gvk, err := apiutil.GVKForObject(regCfg.objectType, mgr.GetScheme()) if err != nil { diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index 5885373213..b3d7fe5efd 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -2,7 +2,9 @@ package provisioner import ( "context" + "errors" "fmt" + "strings" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" @@ -49,6 +51,7 @@ func newEventHandler( }, nil } +//nolint:gocyclo // will refactor at some point func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, batch events.EventBatch) { for _, event := range batch { switch e := event.(type) { @@ -56,7 +59,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, switch obj := e.Resource.(type) { case *gatewayv1.Gateway: h.store.updateGateway(obj) - case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap, *corev1.Secret: + case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap: objLabels := labels.Set(obj.GetLabels()) if h.labelSelector.Matches(objLabels) { gatewayName := objLabels.Get(controller.GatewayLabel) @@ -83,6 +86,20 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, } h.provisioner.cfg.StatusQueue.Enqueue(statusUpdate) } + case *corev1.Secret: + objLabels := labels.Set(obj.GetLabels()) + if h.labelSelector.Matches(objLabels) { + gatewayName := objLabels.Get(controller.GatewayLabel) + gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} + + if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + logger.Error(err, "error handling resource update") + } + } else if h.provisioner.isUserSecret(obj.GetName()) { + if err := h.provisionResourcesForAllGateways(ctx); err != nil { + logger.Error(err, "error updating resources") + } + } default: panic(fmt.Errorf("unknown resource type %T", e.Resource)) } @@ -93,10 +110,20 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, logger.Error(err, "error deprovisioning nginx resources") } h.store.deleteGateway(e.NamespacedName) - case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap, *corev1.Secret: + case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap: if err := h.reprovisionResources(ctx, e); err != nil { logger.Error(err, "error re-provisioning nginx resources") } + case *corev1.Secret: + if h.provisioner.isUserSecret(e.NamespacedName.Name) { + if err := h.deprovisionSecretsForAllGateways(ctx, e.NamespacedName.Name); err != nil { + logger.Error(err, "error removing secrets") + } + } else { + if err := h.reprovisionResources(ctx, e); err != nil { + logger.Error(err, "error re-provisioning nginx resources") + } + } default: panic(fmt.Errorf("unknown resource type %T", e.Type)) } @@ -128,7 +155,17 @@ func (h *eventHandler) updateOrDeleteResources( } h.store.registerResourceInGatewayConfig(gatewayNSName, obj) + if err := h.provisionResources(ctx, gatewayNSName); err != nil { + return fmt.Errorf("error updating nginx resource: %w", err) + } + + return nil +} +func (h *eventHandler) provisionResources( + ctx context.Context, + gatewayNSName types.NamespacedName, +) error { resources := h.store.getNginxResourcesForGateway(gatewayNSName) if resources.Gateway != nil { resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) @@ -160,3 +197,68 @@ func (h *eventHandler) reprovisionResources(ctx context.Context, event *events.D } return nil } + +// provisionResourcesForAllGateways is called when a resource is updated that needs to be applied +// to all Gateway deployments. For example, NGINX Plus secrets. +func (h *eventHandler) provisionResourcesForAllGateways(ctx context.Context) error { + var allErrs []error + gateways := h.store.getGateways() + for gateway := range gateways { + if err := h.provisionResources(ctx, gateway); err != nil { + allErrs = append(allErrs, err) + } + } + + return errors.Join(allErrs...) +} + +// deprovisionSecretsForAllGateways cleans up any secrets that a user deleted that were duplicated +// for all Gateways. For example, NGINX Plus secrets. +func (h *eventHandler) deprovisionSecretsForAllGateways(ctx context.Context, secret string) error { + var allErrs []error + + gateways := h.store.getGateways() + for gateway := range gateways { + resources := h.store.getNginxResourcesForGateway(gateway) + if resources == nil { + continue + } + + switch { + case strings.HasSuffix(resources.PlusJWTSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusJWTSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + case strings.HasSuffix(resources.PlusCASecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusCASecret), + ); err != nil { + allErrs = append(allErrs, err) + } + case strings.HasSuffix(resources.PlusClientSSLSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.PlusClientSSLSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + default: + for _, dockerSecret := range resources.DockerSecrets { + if strings.HasSuffix(dockerSecret.Name, secret) { + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(dockerSecret), + ); err != nil { + allErrs = append(allErrs, err) + } + } + } + } + } + + return errors.Join(allErrs...) +} diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go index bc3aa61d08..720690b972 100644 --- a/internal/mode/static/provisioner/handler_test.go +++ b/internal/mode/static/provisioner/handler_test.go @@ -9,6 +9,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" @@ -22,11 +23,9 @@ func TestHandleEventBatch_Upsert(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore([]string{dockerTestSecretName}, jwtTestSecretName, "", "") provisioner, fakeClient, _ := defaultNginxProvisioner() provisioner.cfg.StatusQueue = status.NewQueue() - provisioner.cfg.Plus = false - provisioner.cfg.NginxDockerSecretNames = nil labelSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"app": "nginx"}, @@ -57,15 +56,57 @@ func TestHandleEventBatch_Upsert(t *testing.T) { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-service", + Name: "gw-nginx", + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "test-gateway"}, + }, + } + + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + jwtTestSecretName, Namespace: "default", Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "test-gateway"}, }, + Data: map[string][]byte{ + "data": []byte("oldData"), + }, + } + + userJwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{ + "data": []byte("oldData"), + }, + } + g.Expect(fakeClient.Create(ctx, userJwtSecret)).To(Succeed()) + + dockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + dockerTestSecretName, + Namespace: "default", + }, + Data: map[string][]byte{ + "data": []byte("oldDockerData"), + }, } + userDockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{ + "data": []byte("oldDockerData"), + }, + } + g.Expect(fakeClient.Create(ctx, userDockerSecret)).To(Succeed()) + // Test handling Gateway upsertEvent := &events.UpsertEvent{Resource: gateway} - batch := events.EventBatch{upsertEvent} handler.HandleEventBatch(ctx, logger, batch) @@ -90,6 +131,44 @@ func TestHandleEventBatch_Upsert(t *testing.T) { g.Expect(provisioner.cfg.StatusQueue.Dequeue(ctx)).ToNot(BeNil()) + // Test handling provisioned Secret + upsertEvent = &events.UpsertEvent{Resource: jwtSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), &corev1.Secret{})).To(Succeed()) + + // Test handling user Plus Secret + secret := &corev1.Secret{} + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("oldData"))) + + userJwtSecret.Data["data"] = []byte("newData") + g.Expect(fakeClient.Update(ctx, userJwtSecret)).To(Succeed()) + upsertEvent = &events.UpsertEvent{Resource: userJwtSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("newData"))) + + // Test handling user Docker Secret + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(dockerSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("oldDockerData"))) + + userDockerSecret.Data["data"] = []byte("newDockerData") + g.Expect(fakeClient.Update(ctx, userDockerSecret)).To(Succeed()) + upsertEvent = &events.UpsertEvent{Resource: userDockerSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(dockerSecret), secret)).To(Succeed()) + g.Expect(secret.Data).To(HaveKey("data")) + g.Expect(secret.Data["data"]).To(Equal([]byte("newDockerData"))) + // remove Gateway from store and verify that Deployment UpsertEvent results in deletion of resource store.deleteGateway(client.ObjectKeyFromObject(gateway)) g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) @@ -117,11 +196,9 @@ func TestHandleEventBatch_Delete(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore([]string{dockerTestSecretName}, jwtTestSecretName, caTestSecretName, clientTestSecretName) provisioner, fakeClient, _ := defaultNginxProvisioner() provisioner.cfg.StatusQueue = status.NewQueue() - provisioner.cfg.Plus = false - provisioner.cfg.NginxDockerSecretNames = nil labelSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"app": "nginx"}, @@ -134,6 +211,7 @@ func TestHandleEventBatch_Delete(t *testing.T) { ctx := context.TODO() logger := logr.Discard() + // initialize resources gateway := &gatewayv1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Name: "gw", @@ -155,15 +233,89 @@ func TestHandleEventBatch_Delete(t *testing.T) { }, } + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw-nginx-" + jwtTestSecretName, + Namespace: "default", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + }, + } + + userJwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: jwtTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userJwtSecret)).To(Succeed()) + + userCASecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: caTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userCASecret)).To(Succeed()) + + userClientSSLSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userClientSSLSecret)).To(Succeed()) + + userDockerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, userDockerSecret)).To(Succeed()) + + upsertEvent := &events.UpsertEvent{Resource: gateway} + batch := events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), deployment) // if deployment is deleted, it should be re-created since Gateway still exists deleteEvent := &events.DeleteEvent{Type: deployment, NamespacedName: client.ObjectKeyFromObject(deployment)} - batch := events.EventBatch{deleteEvent} + batch = events.EventBatch{deleteEvent} handler.HandleEventBatch(ctx, logger, batch) g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + // if provisioned secret is deleted, it should be re-created + deleteEvent = &events.DeleteEvent{Type: jwtSecret, NamespacedName: client.ObjectKeyFromObject(jwtSecret)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(jwtSecret), &corev1.Secret{})).To(Succeed()) + + // if user-provided secrets are deleted, then delete the duplicates of them + verifySecret := func(name string, userSecret *corev1.Secret) { + key := types.NamespacedName{ + Name: "gw-nginx-" + name, + Namespace: "default", + } + + secret := &corev1.Secret{} + g.Expect(fakeClient.Get(ctx, key, secret)).To(Succeed()) + store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), secret) + + g.Expect(fakeClient.Delete(ctx, userSecret)).To(Succeed()) + deleteEvent = &events.DeleteEvent{Type: userSecret, NamespacedName: client.ObjectKeyFromObject(userSecret)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(fakeClient.Get(ctx, key, &corev1.Secret{})).ToNot(Succeed()) + } + + verifySecret(jwtTestSecretName, userJwtSecret) + verifySecret(caTestSecretName, userCASecret) + verifySecret(clientTestSecretName, userClientSSLSecret) + verifySecret(dockerTestSecretName, userDockerSecret) + // delete Gateway deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} batch = events.EventBatch{deleteEvent} diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index 7165805a44..71439f4b4e 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -3,6 +3,7 @@ package provisioner import ( "context" "fmt" + "slices" "strings" "sync" "time" @@ -105,7 +106,16 @@ func NewNginxProvisioner( return nil, nil, fmt.Errorf("error initializing eventHandler: %w", err) } - eventLoop, err := newEventLoop(ctx, mgr, handler, cfg.Logger, selector) + eventLoop, err := newEventLoop( + ctx, + mgr, + handler, + cfg.Logger, + selector, + cfg.GatewayPodConfig.Namespace, + cfg.NginxDockerSecretNames, + cfg.PlusUsageConfig, + ) if err != nil { return nil, nil, err } @@ -162,7 +172,7 @@ func (p *NginxProvisioner) provisionNginx( objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) if err != nil { - return fmt.Errorf("error provisioning nginx resources :%w", err) + p.cfg.Logger.Error(err, "error provisioning some nginx resources") } p.cfg.Logger.Info( @@ -275,7 +285,7 @@ func (p *NginxProvisioner) reprovisionNginx( objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) if err != nil { - return fmt.Errorf("error provisioning nginx resources :%w", err) + p.cfg.Logger.Error(err, "error provisioning some nginx resources") } p.cfg.Logger.Info( @@ -341,6 +351,41 @@ func (p *NginxProvisioner) deprovisionNginx(ctx context.Context, gatewayNSName t return nil } +// isUserSecret determines if the provided secret name is a special user secret, +// for example an NGINX docker registry secret or NGINX Plus secret. +func (p *NginxProvisioner) isUserSecret(name string) bool { + if slices.Contains(p.cfg.NginxDockerSecretNames, name) { + return true + } + + if p.cfg.PlusUsageConfig != nil { + return name == p.cfg.PlusUsageConfig.SecretName || + name == p.cfg.PlusUsageConfig.CASecretName || + name == p.cfg.PlusUsageConfig.ClientSSLSecretName + } + + return false +} + +func (p *NginxProvisioner) deleteSecret(ctx context.Context, secretNSName types.NamespacedName) error { + if !p.isLeader() { + return nil + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNSName.Name, + Namespace: secretNSName.Namespace, + }, + } + + if err := p.k8sClient.Delete(ctx, secret); err != nil && !apierrors.IsNotFound(err) { + return err + } + + return nil +} + // RegisterGateway is called by the main event handler when a Gateway API resource event occurs // and the graph is built. The provisioner updates the Gateway config in the store and then: // - If it's a valid Gateway, create or update nginx resources associated with the Gateway, if necessary. diff --git a/internal/mode/static/provisioner/provisioner_test.go b/internal/mode/static/provisioner/provisioner_test.go index 2c611912d8..8ef7873386 100644 --- a/internal/mode/static/provisioner/provisioner_test.go +++ b/internal/mode/static/provisioner/provisioner_test.go @@ -144,7 +144,7 @@ func defaultNginxProvisioner( deploymentStore := &agentfakes.FakeDeploymentStorer{} return &NginxProvisioner{ - store: newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret"), + store: newStore([]string{dockerTestSecretName}, jwtTestSecretName, caTestSecretName, clientTestSecretName), k8sClient: fakeClient, cfg: Config{ DeploymentStore: deploymentStore, diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go index 5a57d5cc99..ac63beb907 100644 --- a/internal/mode/static/provisioner/store.go +++ b/internal/mode/static/provisioner/store.go @@ -88,6 +88,13 @@ func (s *store) getGateway(nsName types.NamespacedName) *gatewayv1.Gateway { return s.gateways[nsName] } +func (s *store) getGateways() map[types.NamespacedName]*gatewayv1.Gateway { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.gateways +} + // registerResourceInGatewayConfig adds or updates the provided resource in the tracking map. // If the object being updated is the Gateway, check if anything that we care about changed. This ensures that // we don't attempt to update nginx resources when the main event handler triggers this call with an unrelated event diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go index 0358341f03..079736fde0 100644 --- a/internal/mode/static/provisioner/store_test.go +++ b/internal/mode/static/provisioner/store_test.go @@ -64,6 +64,37 @@ func TestDeleteGateway(t *testing.T) { g.Expect(store.getGateway(nsName)).To(BeNil()) } +func TestGetGateways(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + store := newStore(nil, "", "", "") + gateway1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway-1", + Namespace: "default", + }, + } + gateway2 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway-2", + Namespace: "default", + }, + } + nsName1 := client.ObjectKeyFromObject(gateway1) + nsName2 := client.ObjectKeyFromObject(gateway2) + + store.updateGateway(gateway1) + store.updateGateway(gateway2) + + gateways := store.getGateways() + + g.Expect(gateways).To(HaveKey(nsName1)) + g.Expect(gateways).To(HaveKey(nsName2)) + g.Expect(gateways[nsName1]).To(Equal(gateway1)) + g.Expect(gateways[nsName2]).To(Equal(gateway2)) +} + func TestRegisterResourceInGatewayConfig(t *testing.T) { t.Parallel() g := NewWithT(t) From 01bb4160fd2f42ed58dd29f0a559677e8b7d9279 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Sat, 22 Mar 2025 19:14:59 -0700 Subject: [PATCH 16/32] CP/DP Split: Update functional tests (#3207) Update functional tests for the control plane data plane split. Problem: The functional tests do not pass with the current architecture. Solution: Add updates to functional tests. --- internal/mode/static/provisioner/handler.go | 4 + .../mode/static/provisioner/handler_test.go | 20 +- internal/mode/static/provisioner/objects.go | 29 + .../mode/static/provisioner/objects_test.go | 106 ++- internal/mode/static/telemetry/collector.go | 21 +- .../mode/static/telemetry/collector_test.go | 8 - tests/framework/collector.go | 8 + tests/framework/crossplane.go | 4 +- tests/framework/portforward.go | 4 +- tests/framework/resourcemanager.go | 90 +- tests/suite/advanced_routing_test.go | 8 + tests/suite/client_settings_test.go | 21 +- tests/suite/dataplane_perf_test.go | 85 +- tests/suite/graceful_recovery_test.go | 795 ++++++++++-------- tests/suite/manifests/tracing/nginxproxy.yaml | 12 - tests/suite/reconfig_test.go | 7 +- tests/suite/sample_test.go | 36 +- tests/suite/snippets_filter_test.go | 174 ++-- tests/suite/system_suite_test.go | 26 +- tests/suite/telemetry_test.go | 13 +- tests/suite/tracing_test.go | 103 ++- tests/suite/upstream_settings_test.go | 30 +- 22 files changed, 942 insertions(+), 662 deletions(-) delete mode 100644 tests/suite/manifests/tracing/nginxproxy.yaml diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index b3d7fe5efd..7757cec043 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -106,6 +106,10 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, case *events.DeleteEvent: switch e.Type.(type) { case *gatewayv1.Gateway: + if !h.provisioner.isLeader() { + h.provisioner.setResourceToDelete(e.NamespacedName) + } + if err := h.provisioner.deprovisionNginx(ctx, e.NamespacedName); err != nil { logger.Error(err, "error deprovisioning nginx resources") } diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go index 720690b972..fe1d63e9be 100644 --- a/internal/mode/static/provisioner/handler_test.go +++ b/internal/mode/static/provisioner/handler_test.go @@ -316,7 +316,25 @@ func TestHandleEventBatch_Delete(t *testing.T) { verifySecret(clientTestSecretName, userClientSSLSecret) verifySecret(dockerTestSecretName, userDockerSecret) - // delete Gateway + // delete Gateway when provisioner is not leader + provisioner.leader = false + + deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} + batch = events.EventBatch{deleteEvent} + handler.HandleEventBatch(ctx, logger, batch) + + g.Expect(provisioner.resourcesToDeleteOnStartup).To(Equal([]types.NamespacedName{ + { + Namespace: "default", + Name: "gw", + }, + })) + g.Expect(store.getGateway(client.ObjectKeyFromObject(gateway))).To(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(deployment), &appsv1.Deployment{})).To(Succeed()) + + // delete Gateway when provisioner is leader + provisioner.leader = true + deleteEvent = &events.DeleteEvent{Type: gateway, NamespacedName: client.ObjectKeyFromObject(gateway)} batch = events.EventBatch{deleteEvent} handler.HandleEventBatch(ctx, logger, batch) diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index dc73164a40..5e9710efd5 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "maps" + "sort" "strconv" "time" @@ -43,6 +44,10 @@ func (p *NginxProvisioner) buildNginxResourceObjects( gateway *gatewayv1.Gateway, nProxyCfg *graph.EffectiveNginxProxy, ) ([]client.Object, error) { + // Need to ensure nginx resource objects are generated deterministically. Specifically when generating + // an object's field by ranging over a map, since ranging over a map is done in random order, we need to + // do some processing to ensure the generated results are the same each time. + ngxIncludesConfigMapName := controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix) ngxAgentConfigMapName := controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix) @@ -174,6 +179,12 @@ func (p *NginxProvisioner) buildNginxSecrets( } } + // need to sort secrets so everytime buildNginxSecrets is called it will generate the exact same + // array of secrets. This is needed to satisfy deterministic results of the method. + sort.Slice(secrets, func(i, j int) bool { + return secrets[i].GetName() < secrets[j].GetName() + }) + if jwtSecretName != "" { newSecret, err := p.getAndUpdateSecret( p.cfg.PlusUsageConfig.SecretName, @@ -358,6 +369,12 @@ func buildNginxService( servicePorts = append(servicePorts, servicePort) } + // need to sort ports so everytime buildNginxService is called it will generate the exact same + // array of ports. This is needed to satisfy deterministic results of the method. + sort.Slice(servicePorts, func(i, j int) bool { + return servicePorts[i].Port < servicePorts[j].Port + }) + svc := &corev1.Service{ ObjectMeta: objectMeta, Spec: corev1.ServiceSpec{ @@ -467,6 +484,12 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( podAnnotations["prometheus.io/port"] = strconv.Itoa(int(metricsPort)) } + // need to sort ports so everytime buildNginxPodTemplateSpec is called it will generate the exact same + // array of ports. This is needed to satisfy deterministic results of the method. + sort.Slice(containerPorts, func(i, j int) bool { + return containerPorts[i].ContainerPort < containerPorts[j].ContainerPort + }) + image, pullPolicy := p.buildImage(nProxyCfg) spec := corev1.PodTemplateSpec{ @@ -622,6 +645,12 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( spec.Spec.ImagePullSecrets = append(spec.Spec.ImagePullSecrets, ref) } + // need to sort secret names so everytime buildNginxPodTemplateSpec is called it will generate the exact same + // array of secrets. This is needed to satisfy deterministic results of the method. + sort.Slice(spec.Spec.ImagePullSecrets, func(i, j int) bool { + return spec.Spec.ImagePullSecrets[i].Name < spec.Spec.ImagePullSecrets[j].Name + }) + if p.cfg.Plus { initCmd := spec.Spec.InitContainers[0].Command initCmd = append(initCmd, diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go index 27fba0d734..f59f4ce253 100644 --- a/internal/mode/static/provisioner/objects_test.go +++ b/internal/mode/static/provisioner/objects_test.go @@ -59,6 +59,12 @@ func TestBuildNginxResourceObjects(t *testing.T) { { Port: 80, }, + { + Port: 8888, + }, + { + Port: 9999, + }, }, }, } @@ -116,10 +122,24 @@ func TestBuildNginxResourceObjects(t *testing.T) { validateMeta(svc) g.Expect(svc.Spec.Type).To(Equal(defaultServiceType)) g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(defaultServicePolicy)) - g.Expect(svc.Spec.Ports).To(ContainElement(corev1.ServicePort{ - Port: 80, - Name: "port-80", - TargetPort: intstr.FromInt(80), + + // service ports is sorted in ascending order by port number when we make the nginx object + g.Expect(svc.Spec.Ports).To(Equal([]corev1.ServicePort{ + { + Port: 80, + Name: "port-80", + TargetPort: intstr.FromInt(80), + }, + { + Port: 8888, + Name: "port-8888", + TargetPort: intstr.FromInt(8888), + }, + { + Port: 9999, + Name: "port-9999", + TargetPort: intstr.FromInt(9999), + }, })) depObj := objects[4] @@ -132,13 +152,24 @@ func TestBuildNginxResourceObjects(t *testing.T) { g.Expect(template.Spec.Containers).To(HaveLen(1)) container := template.Spec.Containers[0] - g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ - ContainerPort: config.DefaultNginxMetricsPort, - Name: "metrics", - })) - g.Expect(container.Ports).To(ContainElement(corev1.ContainerPort{ - ContainerPort: 80, - Name: "port-80", + // container ports is sorted in ascending order by port number when we make the nginx object + g.Expect(container.Ports).To(Equal([]corev1.ContainerPort{ + { + ContainerPort: 80, + Name: "port-80", + }, + { + ContainerPort: 8888, + Name: "port-8888", + }, + { + ContainerPort: config.DefaultNginxMetricsPort, + Name: "metrics", + }, + { + ContainerPort: 9999, + Name: "port-9999", + }, })) g.Expect(container.Image).To(Equal(fmt.Sprintf("%s:1.0.0", defaultNginxImagePath))) @@ -415,14 +446,32 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { }, Data: map[string][]byte{"data": []byte("docker")}, } - fakeClient := fake.NewFakeClient(dockerSecret) + + dockerSecretRegistry1Name := dockerTestSecretName + "-registry1" + dockerSecretRegistry1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerSecretRegistry1Name, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker-registry1")}, + } + + dockerSecretRegistry2Name := dockerTestSecretName + "-registry2" + dockerSecretRegistry2 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dockerSecretRegistry2Name, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"data": []byte("docker-registry2")}, + } + fakeClient := fake.NewFakeClient(dockerSecret, dockerSecretRegistry1, dockerSecretRegistry2) provisioner := &NginxProvisioner{ cfg: Config{ GatewayPodConfig: &config.GatewayPodConfig{ Namespace: ngfNamespace, }, - NginxDockerSecretNames: []string{dockerTestSecretName}, + NginxDockerSecretNames: []string{dockerTestSecretName, dockerSecretRegistry1Name, dockerSecretRegistry2Name}, }, k8sClient: fakeClient, baseLabelSelector: metav1.LabelSelector{ @@ -443,7 +492,7 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(objects).To(HaveLen(6)) + g.Expect(objects).To(HaveLen(8)) expLabels := map[string]string{ "app": "nginx", @@ -451,18 +500,41 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { "app.kubernetes.io/name": "gw-nginx", } + // the (docker-only) secret order in the object list is sorted by secret name + secretObj := objects[0] secret, ok := secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerTestSecretName))) g.Expect(secret.GetLabels()).To(Equal(expLabels)) - depObj := objects[5] + registry1SecretObj := objects[1] + secret, ok = registry1SecretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry1Name))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + registry2SecretObj := objects[2] + secret, ok = registry2SecretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry2Name))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + depObj := objects[7] dep, ok := depObj.(*appsv1.Deployment) g.Expect(ok).To(BeTrue()) - g.Expect(dep.Spec.Template.Spec.ImagePullSecrets).To(ContainElement(corev1.LocalObjectReference{ - Name: controller.CreateNginxResourceName(resourceName, dockerTestSecretName), + // imagePullSecrets is sorted by name when we make the nginx object + g.Expect(dep.Spec.Template.Spec.ImagePullSecrets).To(Equal([]corev1.LocalObjectReference{ + { + Name: controller.CreateNginxResourceName(resourceName, dockerTestSecretName), + }, + { + Name: controller.CreateNginxResourceName(resourceName, dockerSecretRegistry1Name), + }, + { + Name: controller.CreateNginxResourceName(resourceName, dockerSecretRegistry2Name), + }, })) } diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index facc7dc56a..1be3accf14 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -145,10 +145,7 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { return Data{}, fmt.Errorf("failed to collect cluster information: %w", err) } - graphResourceCount, err := collectGraphResourceCount(g, c.cfg.ConfigurationGetter) - if err != nil { - return Data{}, fmt.Errorf("failed to collect NGF resource counts: %w", err) - } + graphResourceCount := collectGraphResourceCount(g, c.cfg.ConfigurationGetter) replicaSet, err := getPodReplicaSet(ctx, c.cfg.K8sClientReader, c.cfg.PodNSName) if err != nil { @@ -193,14 +190,10 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { func collectGraphResourceCount( g *graph.Graph, configurationGetter ConfigurationGetter, -) (NGFResourceCounts, error) { +) NGFResourceCounts { ngfResourceCounts := NGFResourceCounts{} cfg := configurationGetter.GetLatestConfiguration() - if cfg == nil { - return ngfResourceCounts, errors.New("latest configuration cannot be nil") - } - ngfResourceCounts.GatewayClassCount = int64(len(g.IgnoredGatewayClasses)) if g.GatewayClass != nil { ngfResourceCounts.GatewayClassCount++ @@ -219,9 +212,11 @@ func collectGraphResourceCount( ngfResourceCounts.SecretCount = int64(len(g.ReferencedSecrets)) ngfResourceCounts.ServiceCount = int64(len(g.ReferencedServices)) - for _, upstream := range cfg.Upstreams { - if upstream.ErrorMsg == "" { - ngfResourceCounts.EndpointCount += int64(len(upstream.Endpoints)) + if cfg != nil { + for _, upstream := range cfg.Upstreams { + if upstream.ErrorMsg == "" { + ngfResourceCounts.EndpointCount += int64(len(upstream.Endpoints)) + } } } @@ -249,7 +244,7 @@ func collectGraphResourceCount( ngfResourceCounts.NginxProxyCount = int64(len(g.ReferencedNginxProxies)) ngfResourceCounts.SnippetsFilterCount = int64(len(g.SnippetsFilters)) - return ngfResourceCounts, nil + return ngfResourceCounts } type RouteCounts struct { diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index 2d71dedf90..e216ac3b5a 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -728,14 +728,6 @@ var _ = Describe("Collector", Ordered, func() { _, err := dataCollector.Collect(ctx) Expect(err).To(MatchError(expectedError)) }) - - It("should error on nil latest configuration", func(ctx SpecContext) { - expectedError := errors.New("latest configuration cannot be nil") - fakeConfigurationGetter.GetLatestConfigurationReturns(nil) - - _, err := dataCollector.Collect(ctx) - Expect(err).To(MatchError(expectedError)) - }) }) }) }) diff --git a/tests/framework/collector.go b/tests/framework/collector.go index e5e4d3f377..61e24ba770 100644 --- a/tests/framework/collector.go +++ b/tests/framework/collector.go @@ -28,6 +28,14 @@ func InstallCollector() ([]byte, error) { return output, err } + if output, err := exec.Command( + "helm", + "repo", + "update", + ).CombinedOutput(); err != nil { + return output, fmt.Errorf("failed to update helm repos: %w; output: %s", err, string(output)) + } + args := []string{ "install", collectorChartReleaseName, diff --git a/tests/framework/crossplane.go b/tests/framework/crossplane.go index 81f47e3567..f2ada703c5 100644 --- a/tests/framework/crossplane.go +++ b/tests/framework/crossplane.go @@ -203,7 +203,7 @@ func injectCrossplaneContainer( func createCrossplaneExecutor( k8sClient kubernetes.Interface, k8sConfig *rest.Config, - ngfPodName, + nginxPodName, namespace string, ) (remotecommand.Executor, error) { cmd := []string{"./crossplane", "/etc/nginx/nginx.conf"} @@ -217,7 +217,7 @@ func createCrossplaneExecutor( req := k8sClient.CoreV1().RESTClient().Post(). Resource("pods"). SubResource("exec"). - Name(ngfPodName). + Name(nginxPodName). Namespace(namespace). VersionedParams(opts, scheme.ParameterCodec) diff --git a/tests/framework/portforward.go b/tests/framework/portforward.go index 26cd4b3cfb..500dc354aa 100644 --- a/tests/framework/portforward.go +++ b/tests/framework/portforward.go @@ -52,13 +52,13 @@ func PortForward(config *rest.Config, namespace, podName string, ports []string, for { if err := forward(); err != nil { slog.Error("error forwarding ports", "error", err) - slog.Info("retrying port forward in 100ms...") + slog.Info("retrying port forward in 1s...") } select { case <-stopCh: return - case <-time.After(100 * time.Millisecond): + case <-time.After(1 * time.Second): // retrying } } diff --git a/tests/framework/resourcemanager.go b/tests/framework/resourcemanager.go index 434a5ecaed..50aab0653e 100644 --- a/tests/framework/resourcemanager.go +++ b/tests/framework/resourcemanager.go @@ -701,22 +701,57 @@ func GetReadyNGFPodNames( "app.kubernetes.io/instance": releaseName, }, ); err != nil { - return nil, fmt.Errorf("error getting list of Pods: %w", err) + return nil, fmt.Errorf("error getting list of NGF Pods: %w", err) } - if len(podList.Items) > 0 { - var names []string - for _, pod := range podList.Items { - for _, cond := range pod.Status.Conditions { - if cond.Type == core.PodReady && cond.Status == core.ConditionTrue { - names = append(names, pod.Name) - } + if len(podList.Items) == 0 { + return nil, errors.New("unable to find NGF Pod(s)") + } + + names := getReadyPodNames(podList) + + return names, nil +} + +// GetReadyNginxPodNames returns the name(s) of the NGINX Pod(s). +func GetReadyNginxPodNames( + k8sClient client.Client, + namespace string, + timeout time.Duration, +) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var podList core.PodList + if err := k8sClient.List( + ctx, + &podList, + client.InNamespace(namespace), + client.HasLabels{"gateway.networking.k8s.io/gateway-name"}, + ); err != nil { + return nil, fmt.Errorf("error getting list of NGINX Pods: %w", err) + } + + if len(podList.Items) == 0 { + return nil, errors.New("unable to find NGINX Pod(s)") + } + + names := getReadyPodNames(podList) + + return names, nil +} + +func getReadyPodNames(podList core.PodList) []string { + var names []string + for _, pod := range podList.Items { + for _, cond := range pod.Status.Conditions { + if cond.Type == core.PodReady && cond.Status == core.ConditionTrue { + names = append(names, pod.Name) } } - return names, nil } - return nil, errors.New("unable to find NGF Pod(s)") + return names } func countNumberOfReadyParents(parents []v1.RouteParentStatus) int { @@ -733,34 +768,7 @@ func countNumberOfReadyParents(parents []v1.RouteParentStatus) int { return readyCount } -func (rm *ResourceManager) WaitForAppsToBeReadyWithPodCount(namespace string, podCount int) error { - ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.CreateTimeout) - defer cancel() - - return rm.WaitForAppsToBeReadyWithCtxWithPodCount(ctx, namespace, podCount) -} - -func (rm *ResourceManager) WaitForAppsToBeReadyWithCtxWithPodCount( - ctx context.Context, - namespace string, - podCount int, -) error { - if err := rm.WaitForPodsToBeReadyWithCount(ctx, namespace, podCount); err != nil { - return err - } - - if err := rm.waitForHTTPRoutesToBeReady(ctx, namespace); err != nil { - return err - } - - if err := rm.waitForGRPCRoutesToBeReady(ctx, namespace); err != nil { - return err - } - - return rm.waitForGatewaysToBeReady(ctx, namespace) -} - -// WaitForPodsToBeReady waits for all Pods in the specified namespace to be ready or +// WaitForPodsToBeReadyWithCount waits for all Pods in the specified namespace to be ready or // until the provided context is canceled. func (rm *ResourceManager) WaitForPodsToBeReadyWithCount(ctx context.Context, namespace string, count int) error { return wait.PollUntilContextCancel( @@ -817,17 +825,17 @@ func (rm *ResourceManager) WaitForGatewayObservedGeneration( } // GetNginxConfig uses crossplane to get the nginx configuration and convert it to JSON. -func (rm *ResourceManager) GetNginxConfig(ngfPodName, namespace string) (*Payload, error) { +func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace string) (*Payload, error) { if err := injectCrossplaneContainer( rm.ClientGoClient, rm.TimeoutConfig.UpdateTimeout, - ngfPodName, + nginxPodName, namespace, ); err != nil { return nil, err } - exec, err := createCrossplaneExecutor(rm.ClientGoClient, rm.K8sConfig, ngfPodName, namespace) + exec, err := createCrossplaneExecutor(rm.ClientGoClient, rm.K8sConfig, nginxPodName, namespace) if err != nil { return nil, err } diff --git a/tests/suite/advanced_routing_test.go b/tests/suite/advanced_routing_test.go index a58c9a7f7e..844e1db02c 100644 --- a/tests/suite/advanced_routing_test.go +++ b/tests/suite/advanced_routing_test.go @@ -39,9 +39,17 @@ var _ = Describe("AdvancedRouting", Ordered, Label("functional", "routing"), fun Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) }) AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index f1f12304ee..835f3a9896 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -32,6 +32,8 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" } namespace = "clientsettings" + + nginxPodName string ) BeforeAll(func() { @@ -44,9 +46,19 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -96,13 +108,8 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" filePrefix := fmt.Sprintf("/etc/nginx/includes/ClientSettingsPolicy_%s", namespace) BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/suite/dataplane_perf_test.go b/tests/suite/dataplane_perf_test.go index 5604b0188f..aa34131db1 100644 --- a/tests/suite/dataplane_perf_test.go +++ b/tests/suite/dataplane_perf_test.go @@ -18,44 +18,47 @@ import ( ) var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), func() { - files := []string{ - "dp-perf/coffee.yaml", - "dp-perf/gateway.yaml", - "dp-perf/cafe-routes.yaml", - } - - var ns core.Namespace - - var addr string - targetURL := "http://cafe.example.com" - var outFile *os.File - - t1 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/latte"), - } - t2 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/coffee"), - Header: http.Header{"version": []string{"v2"}}, - } - t3 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/coffee?TEST=v2"), - } - t4 := framework.Target{ - Method: "GET", - URL: fmt.Sprintf("%s%s", targetURL, "/tea"), - } - t5 := framework.Target{ - Method: "POST", - URL: fmt.Sprintf("%s%s", targetURL, "/tea"), - } + var ( + files = []string{ + "dp-perf/coffee.yaml", + "dp-perf/gateway.yaml", + "dp-perf/cafe-routes.yaml", + } + + namespace = "dp-perf" + + targetURL = "http://cafe.example.com" + + t1 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/latte"), + } + t2 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/coffee"), + Header: http.Header{"version": []string{"v2"}}, + } + t3 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/coffee?TEST=v2"), + } + t4 = framework.Target{ + Method: "GET", + URL: fmt.Sprintf("%s%s", targetURL, "/tea"), + } + t5 = framework.Target{ + Method: "POST", + URL: fmt.Sprintf("%s%s", targetURL, "/tea"), + } + + outFile *os.File + addr string + ) BeforeAll(func() { - ns = core.Namespace{ + ns := core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "dp-perf", + Name: namespace, }, } @@ -63,6 +66,12 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) + port := ":80" if portFwdPort != 0 { port = fmt.Sprintf(":%s", strconv.Itoa(portFwdPort)) @@ -79,8 +88,10 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), }) AfterAll(func() { - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + cleanUpPortForward() + + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) outFile.Close() }) diff --git a/tests/suite/graceful_recovery_test.go b/tests/suite/graceful_recovery_test.go index 33c3c447d0..2e844f46e6 100644 --- a/tests/suite/graceful_recovery_test.go +++ b/tests/suite/graceful_recovery_test.go @@ -17,7 +17,6 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctlr "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -29,56 +28,162 @@ const ( ngfContainerName = "nginx-gateway" ) -// Since checkNGFContainerLogsForErrors may experience interference from previous tests (as explained in the function -// documentation), this test is recommended to be run separate from other tests. +// Since this test involves restarting of the test node, it is recommended to be run separate from other tests +// such that any issues in this test do not interfere with other tests. var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), func() { - files := []string{ - "graceful-recovery/cafe.yaml", - "graceful-recovery/cafe-secret.yaml", - "graceful-recovery/gateway.yaml", - "graceful-recovery/cafe-routes.yaml", + var ( + files = []string{ + "graceful-recovery/cafe.yaml", + "graceful-recovery/cafe-secret.yaml", + "graceful-recovery/gateway.yaml", + "graceful-recovery/cafe-routes.yaml", + } + + ns core.Namespace + + baseHTTPURL = "http://cafe.example.com" + baseHTTPSURL = "https://cafe.example.com" + teaURL = baseHTTPSURL + "/tea" + coffeeURL = baseHTTPURL + "/coffee" + + activeNGFPodName, activeNginxPodName string + ) + + checkForWorkingTraffic := func(teaURL, coffeeURL string) error { + if err := expectRequestToSucceed(teaURL, address, "URI: /tea"); err != nil { + return err + } + if err := expectRequestToSucceed(coffeeURL, address, "URI: /coffee"); err != nil { + return err + } + return nil } - var ns core.Namespace + checkForFailingTraffic := func(teaURL, coffeeURL string) error { + if err := expectRequestToFail(teaURL, address); err != nil { + return err + } + if err := expectRequestToFail(coffeeURL, address); err != nil { + return err + } + return nil + } - baseHTTPURL := "http://cafe.example.com" - baseHTTPSURL := "https://cafe.example.com" - teaURL := baseHTTPSURL + "/tea" - coffeeURL := baseHTTPURL + "/coffee" + getContainerRestartCount := func(podName, namespace, containerName string) (int, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() - var ngfPodName string + var pod core.Pod + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: podName}, &pod); err != nil { + return 0, fmt.Errorf("error retrieving Pod: %w", err) + } - BeforeEach(func() { - // this test is unique in that it will check the entire log of both ngf and nginx containers - // for any errors, so in order to avoid errors generated in previous tests we will uninstall - // NGF installed at the suite level, then re-deploy our own. We will also uninstall and re-install - // NGF between each graceful-recovery test for the same reason. - teardown(releaseName) + var restartCount int + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == containerName { + restartCount = int(containerStatus.RestartCount) + } + } - setup(getDefaultSetupCfg()) + return restartCount, nil + } - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) + checkContainerRestart := func(podName, containerName, namespace string, currentRestartCount int) error { + restartCount, err := getContainerRestartCount(podName, namespace, containerName) + if err != nil { + return err + } - ngfPodName = podNames[0] - if portFwdPort != 0 { - coffeeURL = fmt.Sprintf("%s:%d/coffee", baseHTTPURL, portFwdPort) + if restartCount != currentRestartCount+1 { + return fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", + restartCount, currentRestartCount+1) } - if portFwdHTTPSPort != 0 { - teaURL = fmt.Sprintf("%s:%d/tea", baseHTTPSURL, portFwdHTTPSPort) + + return nil + } + + getNodeNames := func() ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() + var nodes core.NodeList + + if err := k8sClient.List(ctx, &nodes); err != nil { + return nil, fmt.Errorf("error listing nodes: %w", err) } - ns = core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "graceful-recovery", - }, + names := make([]string, 0, len(nodes.Items)) + + for _, node := range nodes.Items { + names = append(names, node.Name) } - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReadyWithPodCount(ns.Name, 2)).To(Succeed()) + return names, nil + } + + runNodeDebuggerJob := func(nginxPodName, jobScript string) (*v1.Job, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() + var nginxPod core.Pod + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: nginxPodName}, &nginxPod); err != nil { + return nil, fmt.Errorf("error retrieving NGF Pod: %w", err) + } + + b, err := resourceManager.GetFileContents("graceful-recovery/node-debugger-job.yaml") + if err != nil { + return nil, fmt.Errorf("error processing node debugger job file: %w", err) + } + + job := &v1.Job{} + if err = yaml.Unmarshal(b.Bytes(), job); err != nil { + return nil, fmt.Errorf("error with yaml unmarshal: %w", err) + } + + job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] = nginxPod.Spec.NodeName + if len(job.Spec.Template.Spec.Containers) != 1 { + return nil, fmt.Errorf( + "expected node debugger job to contain one container, actual number: %d", + len(job.Spec.Template.Spec.Containers), + ) + } + job.Spec.Template.Spec.Containers[0].Args = []string{jobScript} + job.Namespace = ns.Name + + if err = resourceManager.Apply([]client.Object{job}); err != nil { + return nil, fmt.Errorf("error in applying job: %w", err) + } + + return job, nil + } + + restartNginxContainer := func(nginxPodName, namespace, containerName string) { + jobScript := "PID=$(pgrep -f \"nginx-agent\") && kill -9 $PID" + + restartCount, err := getContainerRestartCount(nginxPodName, namespace, containerName) + Expect(err).ToNot(HaveOccurred()) + + cleanUpPortForward() + job, err := runNodeDebuggerJob(nginxPodName, jobScript) + Expect(err).ToNot(HaveOccurred()) + + Eventually( + func() error { + return checkContainerRestart(nginxPodName, containerName, namespace, restartCount) + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + // default propagation policy is metav1.DeletePropagationOrphan which does not delete the underlying + // pod created through the job after the job is deleted. Setting it to metav1.DeletePropagationBackground + // deletes the underlying pod after the job is deleted. + Expect(resourceManager.Delete( + []client.Object{job}, + client.PropagationPolicy(metav1.DeletePropagationBackground), + )).To(Succeed()) + } + + checkNGFFunctionality := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { Eventually( func() error { return checkForWorkingTraffic(teaURL, coffeeURL) @@ -86,212 +191,333 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), WithTimeout(timeoutConfig.TestForTrafficTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) - }) - AfterAll(func() { + cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) - }) - It("recovers when NGF container is restarted", func() { - runRecoveryTest(teaURL, coffeeURL, ngfPodName, ngfContainerName, files, &ns) - }) + Eventually( + func() error { + return checkForFailingTraffic(teaURL, coffeeURL) + }). + WithTimeout(timeoutConfig.TestForTrafficTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) - It("recovers when nginx container is restarted", func() { - runRecoveryTest(teaURL, coffeeURL, ngfPodName, nginxContainerName, files, &ns) - }) + Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - It("recovers when drained node is restarted", func() { - runRestartNodeWithDrainingTest(teaURL, coffeeURL, files, &ns) - }) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(10). + Should(BeTrue()) - It("recovers when node is restarted abruptly", func() { - runRestartNodeAbruptlyTest(teaURL, coffeeURL, files, &ns) - }) -}) + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + activeNginxPodName = nginxPodName -func runRestartNodeWithDrainingTest(teaURL, coffeeURL string, files []string, ns *core.Namespace) { - runRestartNodeTest(teaURL, coffeeURL, files, ns, true) -} + setUpPortForward(activeNginxPodName, ns.Name) -func runRestartNodeAbruptlyTest(teaURL, coffeeURL string, files []string, ns *core.Namespace) { - runRestartNodeTest(teaURL, coffeeURL, files, ns, false) -} + Eventually( + func() error { + return checkForWorkingTraffic(teaURL, coffeeURL) + }). + WithTimeout(timeoutConfig.TestForTrafficTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + } -func runRestartNodeTest(teaURL, coffeeURL string, files []string, ns *core.Namespace, drain bool) { - nodeNames, err := getNodeNames() - Expect(err).ToNot(HaveOccurred()) - Expect(nodeNames).To(HaveLen(1)) + runRestartNodeTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace, drain bool) { + nodeNames, err := getNodeNames() + Expect(err).ToNot(HaveOccurred()) + Expect(nodeNames).To(HaveLen(1)) - kindNodeName := nodeNames[0] + kindNodeName := nodeNames[0] - Expect(clusterName).ToNot(BeNil(), "clusterName variable not set") - Expect(*clusterName).ToNot(BeEmpty()) - containerName := *clusterName + "-control-plane" + Expect(clusterName).ToNot(BeNil(), "clusterName variable not set") + Expect(*clusterName).ToNot(BeEmpty()) + containerName := *clusterName + "-control-plane" - if portFwdPort != 0 { - close(portForwardStopCh) - } + cleanUpPortForward() + + if drain { + output, err := exec.Command( + "kubectl", + "drain", + kindNodeName, + "--ignore-daemonsets", + "--delete-emptydir-data", + ).CombinedOutput() - if drain { - output, err := exec.Command( - "kubectl", - "drain", - kindNodeName, - "--ignore-daemonsets", - "--delete-emptydir-data", - ).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(output)) - Expect(err).ToNot(HaveOccurred(), string(output)) + output, err = exec.Command("kubectl", "delete", "node", kindNodeName).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(output)) + } - output, err = exec.Command("kubectl", "delete", "node", kindNodeName).CombinedOutput() - Expect(err).ToNot(HaveOccurred(), string(output)) + _, err = exec.Command("docker", "restart", containerName).CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + + // need to wait for docker container to restart and be running before polling for ready NGF Pods or else we will error + Eventually( + func() bool { + output, err := exec.Command( + "docker", + "inspect", + "-f", + "{{.State.Running}}", + containerName, + ).CombinedOutput() + return strings.TrimSpace(string(output)) == "true" && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(BeTrue()) + + // ngf can often oscillate between ready and error, so we wait for a stable readiness in ngf + var podNames []string + Eventually( + func() bool { + podNames, err = framework.GetReadyNGFPodNames( + k8sClient, + ngfNamespace, + releaseName, + timeoutConfig.GetStatusTimeout, + ) + return len(podNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + newNGFPodName := podNames[0] + + // expected behavior is when node is drained, new pods will be created. when the node is + // abruptly restarted, new pods are not created. + if drain { + Expect(newNGFPodName).ToNot(Equal(activeNGFPodName)) + activeNGFPodName = newNGFPodName + } else { + Expect(newNGFPodName).To(Equal(activeNGFPodName)) + } + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + newNginxPodName := nginxPodNames[0] + + if drain { + Expect(newNginxPodName).ToNot(Equal(activeNginxPodName)) + activeNginxPodName = newNginxPodName + } else { + Expect(newNginxPodName).To(Equal(activeNginxPodName)) + } + + setUpPortForward(activeNginxPodName, ns.Name) + + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, ns) + + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } + + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } } - _, err = exec.Command("docker", "restart", containerName).CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + runRestartNodeWithDrainingTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { + runRestartNodeTest(teaURL, coffeeURL, files, ns, true) + } - // need to wait for docker container to restart and be running before polling for ready NGF Pods or else we will error - Eventually( - func() bool { - output, err := exec.Command( - "docker", - "inspect", - "-f", - "{{.State.Running}}", - containerName, - ).CombinedOutput() - return strings.TrimSpace(string(output)) == "true" && err == nil - }). - WithTimeout(timeoutConfig.CreateTimeout). - WithPolling(500 * time.Millisecond). - Should(BeTrue()) - - // ngf can often oscillate between ready and error, so we wait for a stable readiness in ngf - var podNames []string - Eventually( - func() bool { - podNames, err = framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout) - return len(podNames) == 1 && err == nil - }). - WithTimeout(timeoutConfig.CreateTimeout * 2). - WithPolling(500 * time.Millisecond). - MustPassRepeatedly(20). - Should(BeTrue()) - - ngfPodName := podNames[0] - Expect(ngfPodName).ToNot(BeEmpty()) - - if portFwdPort != 0 { - ports := []string{fmt.Sprintf("%d:80", ngfHTTPForwardedPort), fmt.Sprintf("%d:443", ngfHTTPSForwardedPort)} - portForwardStopCh = make(chan struct{}) - err = framework.PortForward(ctlr.GetConfigOrDie(), ngfNamespace, ngfPodName, ports, portForwardStopCh) - Expect(err).ToNot(HaveOccurred()) + runRestartNodeAbruptlyTest := func(teaURL, coffeeURL string, files []string, ns *core.Namespace) { + runRestartNodeTest(teaURL, coffeeURL, files, ns, false) } - checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, "", files, ns) - if errorLogs := getUnexpectedNginxErrorLogs(ngfPodName); errorLogs != "" { - Skip(fmt.Sprintf("NGINX has unexpected error logs: \n%s", errorLogs)) + getLeaderElectionLeaseHolderName := func() (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + defer cancel() + + var lease coordination.Lease + key := types.NamespacedName{Name: "ngf-test-nginx-gateway-fabric-leader-election", Namespace: ngfNamespace} + + if err := k8sClient.Get(ctx, key, &lease); err != nil { + return "", errors.New("could not retrieve leader election lease") + } + + if *lease.Spec.HolderIdentity == "" { + return "", errors.New("leader election lease holder identity is empty") + } + + return *lease.Spec.HolderIdentity, nil } -} -func runRecoveryTest(teaURL, coffeeURL, ngfPodName, containerName string, files []string, ns *core.Namespace) { - var ( - err error - leaseName string - ) + checkLeaderLeaseChange := func(originalLeaseName string) error { + leaseName, err := getLeaderElectionLeaseHolderName() + if err != nil { + return err + } - if containerName != nginxContainerName { - // Since we have already deployed resources and ran resourceManager.WaitForAppsToBeReadyWithPodCount earlier, - // we know that the applications are ready at this point. This could only be the case if NGF has written - // statuses, which could only be the case if NGF has the leader lease. Since there is only one instance - // of NGF in this test, we can be certain that this is the correct leaseholder name. - leaseName, err = getLeaderElectionLeaseHolderName() - Expect(err).ToNot(HaveOccurred()) + if originalLeaseName == leaseName { + return fmt.Errorf( + "expected originalLeaseName: %s, to not match current leaseName: %s", + originalLeaseName, + leaseName, + ) + } + + return nil } - restartContainer(ngfPodName, containerName) + BeforeAll(func() { + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + + activeNGFPodName = podNames[0] + + ns = core.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "graceful-recovery", + }, + } + + Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + activeNginxPodName = nginxPodNames[0] + + setUpPortForward(activeNginxPodName, ns.Name) + + if portFwdPort != 0 { + coffeeURL = fmt.Sprintf("%s:%d/coffee", baseHTTPURL, portFwdPort) + } + if portFwdHTTPSPort != 0 { + teaURL = fmt.Sprintf("%s:%d/tea", baseHTTPSURL, portFwdHTTPSPort) + } - if containerName != nginxContainerName { Eventually( func() error { - return checkLeaderLeaseChange(leaseName) + return checkForWorkingTraffic(teaURL, coffeeURL) }). - WithTimeout(timeoutConfig.GetLeaderLeaseTimeout). + WithTimeout(timeoutConfig.TestForTrafficTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) - } + }) - checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, containerName, files, ns) - if errorLogs := getUnexpectedNginxErrorLogs(ngfPodName); errorLogs != "" { - Skip(fmt.Sprintf("NGINX has unexpected error logs: \n%s", errorLogs)) - } -} + AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + }) -func restartContainer(ngfPodName, containerName string) { - var jobScript string - if containerName == "nginx" { - jobScript = "PID=$(pgrep -f \"nginx: master process\") && kill -9 $PID" - } else { - jobScript = "PID=$(pgrep -f \"/usr/bin/gateway\") && kill -9 $PID" - } + It("recovers when nginx container is restarted", func() { + restartNginxContainer(activeNginxPodName, ns.Name, nginxContainerName) - restartCount, err := getContainerRestartCount(ngfPodName, containerName) - Expect(err).ToNot(HaveOccurred()) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + activeNginxPodName = nginxPodNames[0] - job, err := runNodeDebuggerJob(ngfPodName, jobScript) - Expect(err).ToNot(HaveOccurred()) + setUpPortForward(activeNginxPodName, ns.Name) - Eventually( - func() error { - return checkContainerRestart(ngfPodName, containerName, restartCount) - }). - WithTimeout(timeoutConfig.ContainerRestartTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - // default propagation policy is metav1.DeletePropagationOrphan which does not delete the underlying - // pod created through the job after the job is deleted. Setting it to metav1.DeletePropagationBackground - // deletes the underlying pod after the job is deleted. - Expect(resourceManager.Delete( - []client.Object{job}, - client.PropagationPolicy(metav1.DeletePropagationBackground), - )).To(Succeed()) -} + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, &ns) -func checkContainerRestart(ngfPodName, containerName string, currentRestartCount int) error { - restartCount, err := getContainerRestartCount(ngfPodName, containerName) - if err != nil { - return err - } + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } - if restartCount != currentRestartCount+1 { - return fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", - restartCount, currentRestartCount+1) - } + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } + }) - return nil -} + It("recovers when NGF Pod is restarted", func() { + leaseName, err := getLeaderElectionLeaseHolderName() + Expect(err).ToNot(HaveOccurred()) -func checkForWorkingTraffic(teaURL, coffeeURL string) error { - if err := expectRequestToSucceed(teaURL, address, "URI: /tea"); err != nil { - return err - } - if err := expectRequestToSucceed(coffeeURL, address, "URI: /coffee"); err != nil { - return err - } - return nil -} + ngfPod, err := resourceManager.GetPod(ngfNamespace, activeNGFPodName) + Expect(err).ToNot(HaveOccurred()) -func checkForFailingTraffic(teaURL, coffeeURL string) error { - if err := expectRequestToFail(teaURL, address); err != nil { - return err - } - if err := expectRequestToFail(coffeeURL, address); err != nil { - return err - } - return nil -} + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.DeleteTimeout) + defer cancel() + + Expect(k8sClient.Delete(ctx, ngfPod)).To(Succeed()) + + var newNGFPodNames []string + Eventually( + func() bool { + newNGFPodNames, err = framework.GetReadyNGFPodNames( + k8sClient, + ngfNamespace, + releaseName, + timeoutConfig.GetStatusTimeout, + ) + return len(newNGFPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout * 2). + WithPolling(500 * time.Millisecond). + MustPassRepeatedly(20). + Should(BeTrue()) + + newNGFPodName := newNGFPodNames[0] + Expect(newNGFPodName).ToNot(BeEmpty()) + + Expect(newNGFPodName).ToNot(Equal(activeNGFPodName)) + activeNGFPodName = newNGFPodName + + Eventually( + func() error { + return checkLeaderLeaseChange(leaseName) + }). + WithTimeout(timeoutConfig.GetLeaderLeaseTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + // sets activeNginxPodName to new pod + checkNGFFunctionality(teaURL, coffeeURL, files, &ns) + + if errorLogs := getNGFErrorLogs(activeNGFPodName); errorLogs != "" { + fmt.Printf("NGF has error logs: \n%s", errorLogs) + } + + if errorLogs := getUnexpectedNginxErrorLogs(activeNginxPodName, ns.Name); errorLogs != "" { + fmt.Printf("NGINX has unexpected error logs: \n%s", errorLogs) + } + }) + + It("recovers when drained node is restarted", func() { + runRestartNodeWithDrainingTest(teaURL, coffeeURL, files, &ns) + }) + + It("recovers when node is restarted abruptly", func() { + if *plusEnabled { + Skip(fmt.Sprintf("Skipping test when using NGINX Plus due to known issue:" + + " https://github.com/nginx/nginx-gateway-fabric/issues/3248")) + } + runRestartNodeAbruptlyTest(teaURL, coffeeURL, files, &ns) + }) +}) func expectRequestToSucceed(appURL, address string, responseBodyMessage string) error { status, body, err := framework.Get(appURL, address, timeoutConfig.RequestTimeout, nil, nil) @@ -324,48 +550,10 @@ func expectRequestToFail(appURL, address string) error { return nil } -func checkNGFFunctionality(teaURL, coffeeURL, ngfPodName, containerName string, files []string, ns *core.Namespace) { - Eventually( - func() error { - return checkForWorkingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - - Eventually( - func() error { - return checkForFailingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReadyWithPodCount(ns.Name, 2)).To(Succeed()) - - Eventually( - func() error { - return checkForWorkingTraffic(teaURL, coffeeURL) - }). - WithTimeout(timeoutConfig.TestForTrafficTimeout). - WithPolling(500 * time.Millisecond). - Should(Succeed()) - - // When the NGINX process is killed, some errors are expected in the NGF logs while we wait for the - // NGINX container to be restarted. Therefore, we don't want to check the NGF logs for errors when the container - // we restarted was NGINX. - if containerName != nginxContainerName { - checkNGFContainerLogsForErrors(ngfPodName) - } -} - -func getNginxErrorLogs(ngfPodName string) string { +func getNginxErrorLogs(nginxPodName, namespace string) string { nginxLogs, err := resourceManager.GetPodLogs( - ngfNamespace, - ngfPodName, + namespace, + nginxPodName, &core.PodLogOptions{Container: nginxContainerName}, ) Expect(err).ToNot(HaveOccurred()) @@ -391,7 +579,7 @@ func getNginxErrorLogs(ngfPodName string) string { return errorLogs } -func getUnexpectedNginxErrorLogs(ngfPodName string) string { +func getUnexpectedNginxErrorLogs(nginxPodName, namespace string) string { expectedErrStrings := []string{ "connect() failed (111: Connection refused)", "could not be resolved (host not found) during usage report", @@ -403,7 +591,7 @@ func getUnexpectedNginxErrorLogs(ngfPodName string) string { unexpectedErrors := "" - errorLogs := getNginxErrorLogs(ngfPodName) + errorLogs := getNginxErrorLogs(nginxPodName, namespace) for _, line := range strings.Split(errorLogs, "\n") { if !slices.ContainsFunc(expectedErrStrings, func(s string) bool { @@ -416,8 +604,8 @@ func getUnexpectedNginxErrorLogs(ngfPodName string) string { return unexpectedErrors } -// checkNGFContainerLogsForErrors checks NGF container's logs for any possible errors. -func checkNGFContainerLogsForErrors(ngfPodName string) { +// getNGFErrorLogs gets NGF container error logs. +func getNGFErrorLogs(ngfPodName string) string { ngfLogs, err := resourceManager.GetPodLogs( ngfNamespace, ngfPodName, @@ -425,111 +613,28 @@ func checkNGFContainerLogsForErrors(ngfPodName string) { ) Expect(err).ToNot(HaveOccurred()) - for _, line := range strings.Split(ngfLogs, "\n") { - Expect(line).ToNot(ContainSubstring("\"level\":\"error\""), line) - } -} - -func checkLeaderLeaseChange(originalLeaseName string) error { - leaseName, err := getLeaderElectionLeaseHolderName() - if err != nil { - return err - } - - if originalLeaseName == leaseName { - return fmt.Errorf("expected originalLeaseName: %s, to not match current leaseName: %s", originalLeaseName, leaseName) - } - - return nil -} - -func getLeaderElectionLeaseHolderName() (string, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var lease coordination.Lease - key := types.NamespacedName{Name: "ngf-test-nginx-gateway-fabric-leader-election", Namespace: ngfNamespace} - - if err := k8sClient.Get(ctx, key, &lease); err != nil { - return "", errors.New("could not retrieve leader election lease") - } - - if *lease.Spec.HolderIdentity == "" { - return "", errors.New("leader election lease holder identity is empty") - } - - return *lease.Spec.HolderIdentity, nil -} - -func getContainerRestartCount(ngfPodName, containerName string) (int, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var ngfPod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ngfNamespace, Name: ngfPodName}, &ngfPod); err != nil { - return 0, fmt.Errorf("error retrieving NGF Pod: %w", err) - } + errorLogs := "" - var restartCount int - for _, containerStatus := range ngfPod.Status.ContainerStatuses { - if containerStatus.Name == containerName { - restartCount = int(containerStatus.RestartCount) + for _, line := range strings.Split(ngfLogs, "\n") { + if strings.Contains(line, "\"level\":\"error\"") { + errorLogs += line + "\n" + break } } - return restartCount, nil -} - -func getNodeNames() ([]string, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - var nodes core.NodeList - - if err := k8sClient.List(ctx, &nodes); err != nil { - return nil, fmt.Errorf("error listing nodes: %w", err) - } - - names := make([]string, 0, len(nodes.Items)) - - for _, node := range nodes.Items { - names = append(names, node.Name) - } - - return names, nil + return errorLogs } -func runNodeDebuggerJob(ngfPodName, jobScript string) (*v1.Job, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) - defer cancel() - - var ngfPod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ngfNamespace, Name: ngfPodName}, &ngfPod); err != nil { - return nil, fmt.Errorf("error retrieving NGF Pod: %w", err) - } - - b, err := resourceManager.GetFileContents("graceful-recovery/node-debugger-job.yaml") - if err != nil { - return nil, fmt.Errorf("error processing node debugger job file: %w", err) - } - - job := &v1.Job{} - if err = yaml.Unmarshal(b.Bytes(), job); err != nil { - return nil, fmt.Errorf("error with yaml unmarshal: %w", err) - } - - job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] = ngfPod.Spec.NodeName - if len(job.Spec.Template.Spec.Containers) != 1 { - return nil, fmt.Errorf( - "expected node debugger job to contain one container, actual number: %d", - len(job.Spec.Template.Spec.Containers), - ) - } - job.Spec.Template.Spec.Containers[0].Args = []string{jobScript} - job.Namespace = ngfNamespace +// checkNGFContainerLogsForErrors checks NGF container's logs for any possible errors. +func checkNGFContainerLogsForErrors(ngfPodName string) { + ngfLogs, err := resourceManager.GetPodLogs( + ngfNamespace, + ngfPodName, + &core.PodLogOptions{Container: ngfContainerName}, + ) + Expect(err).ToNot(HaveOccurred()) - if err = resourceManager.Apply([]client.Object{job}); err != nil { - return nil, fmt.Errorf("error in applying job: %w", err) + for _, line := range strings.Split(ngfLogs, "\n") { + Expect(line).ToNot(ContainSubstring("\"level\":\"error\""), line) } - - return job, nil } diff --git a/tests/suite/manifests/tracing/nginxproxy.yaml b/tests/suite/manifests/tracing/nginxproxy.yaml deleted file mode 100644 index f4876eb186..0000000000 --- a/tests/suite/manifests/tracing/nginxproxy.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: gateway.nginx.org/v1alpha2 -kind: NginxProxy -metadata: - name: nginx-proxy -spec: - telemetry: - exporter: - endpoint: otel-collector-opentelemetry-collector.collector.svc:4317 - serviceName: my-test-svc - spanAttributes: - - key: testkey1 - value: testval1 diff --git a/tests/suite/reconfig_test.go b/tests/suite/reconfig_test.go index 7503aef764..fb4d6c02ce 100644 --- a/tests/suite/reconfig_test.go +++ b/tests/suite/reconfig_test.go @@ -406,7 +406,12 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r } checkNGFContainerLogsForErrors(ngfPodName) - nginxErrorLogs := getNginxErrorLogs(ngfPodName) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, reconfigNamespace.Name, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxErrorLogs := getNginxErrorLogs(nginxPodNames[0], reconfigNamespace.Name) reloadCount, err := framework.GetReloadCount(promInstance, ngfPodName) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/suite/sample_test.go b/tests/suite/sample_test.go index bd883ae710..3426ed973f 100644 --- a/tests/suite/sample_test.go +++ b/tests/suite/sample_test.go @@ -17,29 +17,39 @@ import ( ) var _ = Describe("Basic test example", Label("functional"), func() { - files := []string{ - "hello-world/apps.yaml", - "hello-world/gateway.yaml", - "hello-world/routes.yaml", - } + var ( + files = []string{ + "hello-world/apps.yaml", + "hello-world/gateway.yaml", + "hello-world/routes.yaml", + } - var ns core.Namespace + namespace = "helloworld" + ) BeforeEach(func() { - ns = core.Namespace{ + ns := &core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "helloworld", + Name: namespace, }, } - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) }) AfterEach(func() { - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) + cleanUpPortForward() + + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) It("sends traffic", func() { diff --git a/tests/suite/snippets_filter_test.go b/tests/suite/snippets_filter_test.go index 1edf2b2fe3..39e099a8f6 100644 --- a/tests/suite/snippets_filter_test.go +++ b/tests/suite/snippets_filter_test.go @@ -10,7 +10,6 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" @@ -28,6 +27,8 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter } namespace = "snippets-filter" + + nginxPodName string ) BeforeAll(func() { @@ -40,9 +41,19 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -68,8 +79,11 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter for _, name := range snippetsFilterNames { nsname := types.NamespacedName{Name: name, Namespace: namespace} - err := waitForSnippetsFilterToBeAccepted(nsname) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("%s was not accepted", name)) + Eventually(checkForSnippetsFilterToBeAccepted). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500*time.Millisecond). + Should(Succeed(), fmt.Sprintf("%s was not accepted", name)) } }) @@ -104,13 +118,8 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter grpcRouteSuffix := fmt.Sprintf("%s_grpc-all-contexts.conf", namespace) BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) Expect(err).ToNot(HaveOccurred()) }) @@ -221,7 +230,11 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) nsname := types.NamespacedName{Name: "tea", Namespace: namespace} - Expect(waitForHTTPRouteToHaveGatewayNotProgrammedCond(nsname)).To(Succeed()) + Eventually(checkHTTPRouteToHaveGatewayNotProgrammedCond). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) }) @@ -232,116 +245,99 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) nsname := types.NamespacedName{Name: "soda", Namespace: namespace} - Expect(waitForHTTPRouteToHaveGatewayNotProgrammedCond(nsname)).To(Succeed()) + Eventually(checkHTTPRouteToHaveGatewayNotProgrammedCond). + WithArguments(nsname). + WithTimeout(timeoutConfig.GetStatusTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) }) }) }) -func waitForHTTPRouteToHaveGatewayNotProgrammedCond(httpRouteNsName types.NamespacedName) error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout*2) +func checkHTTPRouteToHaveGatewayNotProgrammedCond(httpRouteNsName types.NamespacedName) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) defer cancel() GinkgoWriter.Printf( - "Waiting for HTTPRoute %q to have the condition Accepted/True/GatewayNotProgrammed\n", + "Checking for HTTPRoute %q to have the condition Accepted/True/GatewayNotProgrammed\n", httpRouteNsName, ) - return wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(ctx context.Context) (bool, error) { - var hr v1.HTTPRoute - var err error + var hr v1.HTTPRoute + var err error - if err = k8sClient.Get(ctx, httpRouteNsName, &hr); err != nil { - return false, err - } + if err = k8sClient.Get(ctx, httpRouteNsName, &hr); err != nil { + return err + } - if len(hr.Status.Parents) == 0 { - return false, nil - } + if len(hr.Status.Parents) != 1 { + return fmt.Errorf("httproute has %d parent statuses, expected 1", len(hr.Status.Parents)) + } - if len(hr.Status.Parents) != 1 { - return false, fmt.Errorf("httproute has %d parent statuses, expected 1", len(hr.Status.Parents)) - } + parent := hr.Status.Parents[0] + if parent.Conditions == nil { + return fmt.Errorf("expected parent conditions to not be nil") + } - parent := hr.Status.Parents[0] - if parent.Conditions == nil { - return false, fmt.Errorf("expected parent conditions to not be nil") - } + cond := parent.Conditions[1] + if cond.Type != string(v1.GatewayConditionAccepted) { + return fmt.Errorf("expected condition type to be Accepted, got %s", cond.Type) + } - cond := parent.Conditions[1] - if cond.Type != string(v1.GatewayConditionAccepted) { - return false, fmt.Errorf("expected condition type to be Accepted, got %s", cond.Type) - } + if cond.Status != metav1.ConditionFalse { + return fmt.Errorf("expected condition status to be False, got %s", cond.Status) + } - if cond.Status != metav1.ConditionFalse { - return false, fmt.Errorf("expected condition status to be False, got %s", cond.Status) - } + if cond.Reason != string(conditions.RouteReasonGatewayNotProgrammed) { + return fmt.Errorf("expected condition reason to be GatewayNotProgrammed, got %s", cond.Reason) + } - if cond.Reason != string(conditions.RouteReasonGatewayNotProgrammed) { - return false, fmt.Errorf("expected condition reason to be GatewayNotProgrammed, got %s", cond.Reason) - } - return err == nil, err - }, - ) + return nil } -func waitForSnippetsFilterToBeAccepted(snippetsFilterNsNames types.NamespacedName) error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout) +func checkForSnippetsFilterToBeAccepted(snippetsFilterNsNames types.NamespacedName) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) defer cancel() GinkgoWriter.Printf( - "Waiting for SnippetsFilter %q to have the condition Accepted/True/Accepted\n", + "Checking for SnippetsFilter %q to have the condition Accepted/True/Accepted\n", snippetsFilterNsNames, ) - return wait.PollUntilContextCancel( - ctx, - 500*time.Millisecond, - true, /* poll immediately */ - func(ctx context.Context) (bool, error) { - var sf ngfAPI.SnippetsFilter - var err error + var sf ngfAPI.SnippetsFilter + var err error - if err = k8sClient.Get(ctx, snippetsFilterNsNames, &sf); err != nil { - return false, err - } - - if len(sf.Status.Controllers) == 0 { - return false, nil - } + if err = k8sClient.Get(ctx, snippetsFilterNsNames, &sf); err != nil { + return err + } - if len(sf.Status.Controllers) != 1 { - return false, fmt.Errorf("snippetsFilter has %d controller statuses, expected 1", len(sf.Status.Controllers)) - } + if len(sf.Status.Controllers) != 1 { + return fmt.Errorf("snippetsFilter has %d controller statuses, expected 1", len(sf.Status.Controllers)) + } - status := sf.Status.Controllers[0] - if status.ControllerName != ngfControllerName { - return false, fmt.Errorf("expected controller name to be %s, got %s", ngfControllerName, status.ControllerName) - } + status := sf.Status.Controllers[0] + if status.ControllerName != ngfControllerName { + return fmt.Errorf("expected controller name to be %s, got %s", ngfControllerName, status.ControllerName) + } - condition := status.Conditions[0] - if condition.Type != string(ngfAPI.SnippetsFilterConditionTypeAccepted) { - return false, fmt.Errorf("expected condition type to be Accepted, got %s", condition.Type) - } + condition := status.Conditions[0] + if condition.Type != string(ngfAPI.SnippetsFilterConditionTypeAccepted) { + return fmt.Errorf("expected condition type to be Accepted, got %s", condition.Type) + } - if status.Conditions[0].Status != metav1.ConditionTrue { - return false, fmt.Errorf("expected condition status to be %s, got %s", metav1.ConditionTrue, condition.Status) - } + if status.Conditions[0].Status != metav1.ConditionTrue { + return fmt.Errorf("expected condition status to be %s, got %s", metav1.ConditionTrue, condition.Status) + } - if status.Conditions[0].Reason != string(ngfAPI.SnippetsFilterConditionReasonAccepted) { - return false, fmt.Errorf( - "expected condition reason to be %s, got %s", - ngfAPI.SnippetsFilterConditionReasonAccepted, - condition.Reason, - ) - } + if status.Conditions[0].Reason != string(ngfAPI.SnippetsFilterConditionReasonAccepted) { + return fmt.Errorf( + "expected condition reason to be %s, got %s", + ngfAPI.SnippetsFilterConditionReasonAccepted, + condition.Reason, + ) + } - return err == nil, err - }, - ) + return nil } diff --git a/tests/suite/system_suite_test.go b/tests/suite/system_suite_test.go index 684ad1d998..39e9e15524 100644 --- a/tests/suite/system_suite_test.go +++ b/tests/suite/system_suite_test.go @@ -71,7 +71,7 @@ var ( var ( //go:embed manifests/* manifests embed.FS - k8sClient client.Client + k8sClient client.Client // TODO: are the k8sClient and the resourceManager.k8sClient the same? resourceManager framework.ResourceManager portForwardStopCh chan struct{} portFwdPort int @@ -185,20 +185,34 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { ) Expect(err).ToNot(HaveOccurred()) Expect(podNames).ToNot(BeEmpty()) +} + +func setUpPortForward(nginxPodName, nginxNamespace string) { + var err error if *serviceType != "LoadBalancer" { ports := []string{fmt.Sprintf("%d:80", ngfHTTPForwardedPort), fmt.Sprintf("%d:443", ngfHTTPSForwardedPort)} portForwardStopCh = make(chan struct{}) - err = framework.PortForward(k8sConfig, installCfg.Namespace, podNames[0], ports, portForwardStopCh) + err = framework.PortForward(resourceManager.K8sConfig, nginxNamespace, nginxPodName, ports, portForwardStopCh) address = "127.0.0.1" portFwdPort = ngfHTTPForwardedPort portFwdHTTPSPort = ngfHTTPSForwardedPort } else { - address, err = resourceManager.GetLBIPAddress(installCfg.Namespace) + address, err = resourceManager.GetLBIPAddress(nginxNamespace) } Expect(err).ToNot(HaveOccurred()) } +// cleanUpPortForward closes the port forward channel and needs to be called before deleting any gateways or else +// the logs will be flooded with port forward errors. +func cleanUpPortForward() { + if portFwdPort != 0 { + close(portForwardStopCh) + portFwdPort = 0 + portFwdHTTPSPort = 0 + } +} + func createNGFInstallConfig(cfg setupConfig, extraInstallArgs ...string) framework.InstallationConfig { installCfg := framework.InstallationConfig{ ReleaseName: cfg.releaseName, @@ -252,12 +266,6 @@ func createNGFInstallConfig(cfg setupConfig, extraInstallArgs ...string) framewo } func teardown(relName string) { - if portFwdPort != 0 { - close(portForwardStopCh) - portFwdPort = 0 - portFwdHTTPSPort = 0 - } - cfg := framework.InstallationConfig{ ReleaseName: relName, Namespace: ngfNamespace, diff --git a/tests/suite/telemetry_test.go b/tests/suite/telemetry_test.go index ba15f130f1..c823cc590c 100644 --- a/tests/suite/telemetry_test.go +++ b/tests/suite/telemetry_test.go @@ -12,6 +12,10 @@ import ( ) var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func() { + // To run the tracing test, you must build NGF with the following values: + // TELEMETRY_ENDPOINT=otel-collector-opentelemetry-collector.collector.svc.cluster.local:4317 + // TELEMETRY_ENDPOINT_INSECURE = true + BeforeEach(func() { // Because NGF reports telemetry on start, we need to install the collector first. @@ -22,10 +26,9 @@ var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func( // Install NGF // Note: the BeforeSuite call doesn't install NGF for 'telemetry' label - setup( - getDefaultSetupCfg(), - "--set", "nginxGateway.productTelemetry.enable=true", - ) + cfg := getDefaultSetupCfg() + cfg.telemetry = true + setup(cfg) }) AfterEach(func() { @@ -86,7 +89,7 @@ var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func( "GatewayAttachedClientSettingsPolicyCount: Int(0)", "RouteAttachedClientSettingsPolicyCount: Int(0)", "ObservabilityPolicyCount: Int(0)", - "NginxProxyCount: Int(0)", + "NginxProxyCount: Int(1)", "SnippetsFilterCount: Int(0)", "UpstreamSettingsPolicyCount: Int(0)", "NGFReplicaCount: Int(1)", diff --git a/tests/suite/tracing_test.go b/tests/suite/tracing_test.go index e1d6aceff5..83c46d4cb4 100644 --- a/tests/suite/tracing_test.go +++ b/tests/suite/tracing_test.go @@ -19,6 +19,7 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" "github.com/nginx/nginx-gateway-fabric/tests/framework" ) @@ -26,26 +27,58 @@ import ( // This test can be flaky when waiting to see traces show up in the collector logs. // Sometimes they get there right away, sometimes it takes 30 seconds. Retries were // added to attempt to mitigate the issue, but it didn't fix it 100%. -var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), func() { +var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "tracing"), func() { + // To run the tracing test, you must build NGF with the following values: + // TELEMETRY_ENDPOINT=otel-collector-opentelemetry-collector.collector.svc.cluster.local:4317 + // TELEMETRY_ENDPOINT_INSECURE = true + var ( files = []string{ "hello-world/apps.yaml", "hello-world/gateway.yaml", "hello-world/routes.yaml", } - nginxProxyFile = "tracing/nginxproxy.yaml" policySingleFile = "tracing/policy-single.yaml" policyMultipleFile = "tracing/policy-multiple.yaml" - ns core.Namespace + namespace = "helloworld" collectorPodName, helloURL, worldURL, helloworldURL string ) + updateNginxProxyTelemetrySpec := func(telemetry ngfAPIv1alpha2.Telemetry) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout) + defer cancel() + + key := types.NamespacedName{Name: "ngf-test-proxy-config", Namespace: "nginx-gateway"} + var nginxProxy ngfAPIv1alpha2.NginxProxy + Expect(k8sClient.Get(ctx, key, &nginxProxy)).To(Succeed()) + + nginxProxy.Spec.Telemetry = &telemetry + + Expect(k8sClient.Update(ctx, &nginxProxy)).To(Succeed()) + } + + BeforeAll(func() { + telemetry := ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("otel-collector-opentelemetry-collector.collector.svc:4317"), + }, + ServiceName: helpers.GetPointer("my-test-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{{ + Key: "testkey1", + Value: "testval1", + }}, + } + + updateNginxProxyTelemetrySpec(telemetry) + }) + + // BeforeEach is needed because FlakeAttempts do not re-run BeforeAll/AfterAll nodes BeforeEach(func() { - ns = core.Namespace{ + ns := &core.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "helloworld", + Name: namespace, }, } @@ -55,9 +88,15 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu collectorPodName, err = framework.GetCollectorPodName(resourceManager) Expect(err).ToNot(HaveOccurred()) - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], namespace) url := "http://foo.example.com" helloURL = url + "/hello" @@ -74,41 +113,17 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu output, err := framework.UninstallCollector(resourceManager) Expect(err).ToNot(HaveOccurred(), string(output)) - Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteFromFiles( - []string{nginxProxyFile, policySingleFile, policyMultipleFile}, ns.Name)).To(Succeed()) - Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) - - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout) - defer cancel() - - key := types.NamespacedName{Name: gatewayClassName} - var gwClass gatewayv1.GatewayClass - Expect(k8sClient.Get(ctx, key, &gwClass)).To(Succeed()) + cleanUpPortForward() - gwClass.Spec.ParametersRef = nil - - Expect(k8sClient.Update(ctx, &gwClass)).To(Succeed()) + Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) + Expect(resourceManager.DeleteFromFiles( + []string{policySingleFile, policyMultipleFile}, namespace)).To(Succeed()) + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) - updateGatewayClass := func() error { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout) - defer cancel() - - key := types.NamespacedName{Name: gatewayClassName} - var gwClass gatewayv1.GatewayClass - if err := k8sClient.Get(ctx, key, &gwClass); err != nil { - return err - } - - gwClass.Spec.ParametersRef = &gatewayv1.ParametersReference{ - Group: ngfAPIv1alpha1.GroupName, - Kind: gatewayv1.Kind("NginxProxy"), - Name: "nginx-proxy", - } - - return k8sClient.Update(ctx, &gwClass) - } + AfterAll(func() { + updateNginxProxyTelemetrySpec(ngfAPIv1alpha2.Telemetry{}) + }) sendRequests := func(url string, count int) { for range count { @@ -168,11 +183,9 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu // install tracing configuration traceFiles := []string{ - nginxProxyFile, policySingleFile, } - Expect(resourceManager.ApplyFromFiles(traceFiles, ns.Name)).To(Succeed()) - Expect(updateGatewayClass()).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(traceFiles, namespace)).To(Succeed()) checkStatusAndTraces() @@ -192,11 +205,9 @@ var _ = Describe("Tracing", FlakeAttempts(2), Label("functional", "tracing"), fu It("sends tracing spans for one policy attached to multiple routes", func() { // install tracing configuration traceFiles := []string{ - nginxProxyFile, policyMultipleFile, } - Expect(resourceManager.ApplyFromFiles(traceFiles, ns.Name)).To(Succeed()) - Expect(updateGatewayClass()).To(Succeed()) + Expect(resourceManager.ApplyFromFiles(traceFiles, namespace)).To(Succeed()) checkStatusAndTraces() diff --git a/tests/suite/upstream_settings_test.go b/tests/suite/upstream_settings_test.go index 23fceb768d..dad10bc0e5 100644 --- a/tests/suite/upstream_settings_test.go +++ b/tests/suite/upstream_settings_test.go @@ -31,6 +31,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic namespace = "uspolicy" gatewayName = "gateway" + + nginxPodName string ) zoneSize := "512k" @@ -48,9 +50,19 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName = nginxPodNames[0] + + setUpPortForward(nginxPodName, namespace) }) AfterAll(func() { + cleanUpPortForward() + Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) }) @@ -117,13 +129,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic var conf *framework.Payload BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) Expect(err).ToNot(HaveOccurred()) }) @@ -302,13 +309,8 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic var conf *framework.Payload BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - - ngfPodName := podNames[0] - - conf, err = resourceManager.GetNginxConfig(ngfPodName, ngfNamespace) + var err error + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) Expect(err).ToNot(HaveOccurred()) }) From 8e83ddd34844125482aaa5c1d4d31870f7fb837c Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 24 Mar 2025 10:51:38 -0600 Subject: [PATCH 17/32] CP/DP split: Secure connection (#3244) Problem: We want to ensure that the connection between the control plane and data plane is authenticated and secure. Solution: 1. Configure agent to send the kubernetes service token in the request. The control plane validates this token using the TokenReview API to ensure the agent is authenticated. 2. Configure TLS certificates for both the control and data planes. By default, a Job will run when installing NGF that creates self-signed certificates in the nginx-gateway namespace. The server Secret is mounted to the control plane, and the control plane copies the client Secret when deploying nginx resources. This Secret is mounted to the agent. The control plane will reset the agent connection if it detects that its own certs have changed. For production environments, we'll recommend a user configures TLS using cert-manager instead, for better security and certificate rotation. --- charts/nginx-gateway-fabric/README.md | 6 + .../templates/certs-job.yaml | 112 +++++++ .../templates/clusterrole.yaml | 6 + .../templates/deployment.yaml | 8 + .../nginx-gateway-fabric/templates/scc.yaml | 2 + .../nginx-gateway-fabric/values.schema.json | 42 +++ charts/nginx-gateway-fabric/values.yaml | 21 ++ cmd/gateway/certs.go | 230 ++++++++++++++ cmd/gateway/certs_test.go | 139 +++++++++ cmd/gateway/commands.go | 160 ++++++++-- cmd/gateway/commands_test.go | 132 +++++++- cmd/gateway/main.go | 1 + deploy/aws-nlb/deploy.yaml | 107 +++++++ deploy/azure/deploy.yaml | 107 +++++++ deploy/default/deploy.yaml | 107 +++++++ deploy/experimental-nginx-plus/deploy.yaml | 107 +++++++ deploy/experimental/deploy.yaml | 107 +++++++ deploy/nginx-plus/deploy.yaml | 107 +++++++ deploy/nodeport/deploy.yaml | 107 +++++++ deploy/openshift/deploy.yaml | 109 +++++++ .../snippets-filters-nginx-plus/deploy.yaml | 107 +++++++ deploy/snippets-filters/deploy.yaml | 107 +++++++ go.mod | 2 +- internal/framework/controller/index/pod.go | 19 ++ .../framework/controller/index/pod_test.go | 53 ++++ internal/framework/controller/register.go | 4 +- internal/mode/static/config/config.go | 2 + internal/mode/static/manager.go | 27 ++ internal/mode/static/nginx/agent/agent.go | 2 + .../mode/static/nginx/agent/agent_test.go | 4 +- internal/mode/static/nginx/agent/command.go | 7 +- .../mode/static/nginx/agent/command_test.go | 79 +++++ .../static/nginx/agent/grpc/connections.go | 3 - .../nginx/agent/grpc/context/context.go | 1 + .../nginx/agent/grpc/filewatcher/doc.go | 4 + .../agent/grpc/filewatcher/filewatcher.go | 106 +++++++ .../grpc/filewatcher/filewatcher_test.go | 69 +++++ internal/mode/static/nginx/agent/grpc/grpc.go | 74 ++++- .../agent/grpc/interceptor/interceptor.go | 143 ++++++++- .../grpc/interceptor/interceptor_test.go | 292 ++++++++++++++++++ internal/mode/static/provisioner/eventloop.go | 4 +- internal/mode/static/provisioner/handler.go | 9 +- .../mode/static/provisioner/handler_test.go | 19 +- internal/mode/static/provisioner/objects.go | 74 ++++- .../mode/static/provisioner/objects_test.go | 139 ++++++--- .../mode/static/provisioner/provisioner.go | 16 +- .../static/provisioner/provisioner_test.go | 49 ++- internal/mode/static/provisioner/store.go | 18 +- .../mode/static/provisioner/store_test.go | 45 ++- internal/mode/static/provisioner/templates.go | 7 + 50 files changed, 3082 insertions(+), 120 deletions(-) create mode 100644 charts/nginx-gateway-fabric/templates/certs-job.yaml create mode 100644 cmd/gateway/certs.go create mode 100644 cmd/gateway/certs_test.go create mode 100644 internal/framework/controller/index/pod.go create mode 100644 internal/framework/controller/index/pod_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/filewatcher/doc.go create mode 100644 internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go create mode 100644 internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go create mode 100644 internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 3d0d8fc235..927d4779d6 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -258,6 +258,12 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | Key | Description | Type | Default | |-----|-------------|------|---------| +| `certGenerator` | The certGenerator section contains the configuration for the cert-generator Job. | object | `{"agentTLSSecretName":"agent-tls","annotations":{},"overwrite":false,"serverTLSSecretName":"server-tls"}` | +| `certGenerator.agentTLSSecretName` | The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"agent-tls"` | +| `certGenerator.annotations` | The annotations of the cert-generator Job. | object | `{}` | +| `certGenerator.overwrite` | Overwrite existing TLS Secrets on startup. | bool | `false` | +| `certGenerator.serverTLSSecretName` | The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"server-tls"` | +| `clusterDomain` | The DNS cluster domain of your Kubernetes cluster. | string | `"cluster.local"` | | `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | | `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. | object | `{}` | | `nginx.container` | The container configuration for the NGINX container. | object | `{}` | diff --git a/charts/nginx-gateway-fabric/templates/certs-job.yaml b/charts/nginx-gateway-fabric/templates/certs-job.yaml new file mode 100644 index 0000000000..a2b529ae1b --- /dev/null +++ b/charts/nginx-gateway-fabric/templates/certs-job.yaml @@ -0,0 +1,112 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +{{- if or .Values.nginxGateway.serviceAccount.imagePullSecret .Values.nginxGateway.serviceAccount.imagePullSecrets }} +imagePullSecrets: + {{- if .Values.nginxGateway.serviceAccount.imagePullSecret }} + - name: {{ .Values.nginxGateway.serviceAccount.imagePullSecret }} + {{- end }} + {{- if .Values.nginxGateway.serviceAccount.imagePullSecrets }} + {{- range .Values.nginxGateway.serviceAccount.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "nginx-gateway.fullname" . }}-cert-generator +subjects: +- kind: ServiceAccount + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "nginx-gateway.fullname" . }}-cert-generator + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + {{- with .Values.certGenerator.annotations -}} + {{ toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": pre-install, pre-upgrade +spec: + template: + metadata: + annotations: + {{- with .Values.certGenerator.annotations -}} + {{ toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - args: + - generate-certs + - --service={{ include "nginx-gateway.fullname" . }} + - --cluster-domain={{ .Values.clusterDomain }} + - --server-tls-secret={{ .Values.certGenerator.serverTLSSecretName }} + - --agent-tls-secret={{ .Values.certGenerator.agentTLSSecretName }} + {{- if .Values.certGenerator.overwrite }} + - --overwrite + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: {{ .Values.nginxGateway.image.repository }}:{{ default .Chart.AppVersion .Values.nginxGateway.image.tag }} + imagePullPolicy: {{ .Values.nginxGateway.image.pullPolicy }} + name: cert-generator + securityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 101 + runAsGroup: 1001 + restartPolicy: Never + serviceAccountName: {{ include "nginx-gateway.fullname" . }}-cert-generator + securityContext: + fsGroup: 1001 + runAsNonRoot: true + ttlSecondsAfterFinished: 0 diff --git a/charts/nginx-gateway-fabric/templates/clusterrole.yaml b/charts/nginx-gateway-fabric/templates/clusterrole.yaml index 2ae9c5a2c0..479c22adbc 100644 --- a/charts/nginx-gateway-fabric/templates/clusterrole.yaml +++ b/charts/nginx-gateway-fabric/templates/clusterrole.yaml @@ -59,6 +59,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index e29710d5fd..e742e99ec5 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -42,6 +42,7 @@ spec: - --gatewayclass={{ .Values.nginxGateway.gatewayClassName }} - --config={{ include "nginx-gateway.config-name" . }} - --service={{ include "nginx-gateway.fullname" . }} + - --agent-tls-secret={{ .Values.certGenerator.agentTLSSecretName }} {{- if .Values.nginx.imagePullSecret }} - --nginx-docker-secret={{ .Values.nginx.imagePullSecret }} {{- end }} @@ -155,6 +156,9 @@ spec: readOnlyRootFilesystem: true runAsUser: 101 runAsGroup: 1001 + volumeMounts: + - name: nginx-agent-tls + mountPath: /var/run/secrets/ngf {{- with .Values.nginxGateway.extraVolumeMounts -}} {{ toYaml . | nindent 8 }} {{- end }} @@ -179,6 +183,10 @@ spec: nodeSelector: {{- toYaml .Values.nginxGateway.nodeSelector | nindent 8 }} {{- end }} + volumes: + - name: nginx-agent-tls + secret: + secretName: {{ .Values.certGenerator.serverTLSSecretName }} {{- with .Values.nginxGateway.extraVolumes -}} {{ toYaml . | nindent 6 }} {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/scc.yaml b/charts/nginx-gateway-fabric/templates/scc.yaml index 6ab7dc92c1..1564e84e32 100644 --- a/charts/nginx-gateway-fabric/templates/scc.yaml +++ b/charts/nginx-gateway-fabric/templates/scc.yaml @@ -34,4 +34,6 @@ users: - {{ printf "system:serviceaccount:%s:%s" .Release.Namespace (include "nginx-gateway.serviceAccountName" .) }} requiredDropCapabilities: - ALL +volumes: +- secret {{- end }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 9824d6ca3c..ae9d507656 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -1,6 +1,48 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { + "certGenerator": { + "description": "The certGenerator section contains the configuration for the cert-generator Job.", + "properties": { + "agentTLSSecretName": { + "default": "agent-tls", + "description": "The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely\ncommunicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the\nNGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).", + "required": [], + "title": "agentTLSSecretName", + "type": "string" + }, + "annotations": { + "description": "The annotations of the cert-generator Job.", + "required": [], + "title": "annotations", + "type": "object" + }, + "overwrite": { + "default": false, + "description": "Overwrite existing TLS Secrets on startup.", + "required": [], + "title": "overwrite", + "type": "boolean" + }, + "serverTLSSecretName": { + "default": "server-tls", + "description": "The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane\nto securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric\ncontrol plane is running in (default namespace: nginx-gateway).", + "required": [], + "title": "serverTLSSecretName", + "type": "string" + } + }, + "required": [], + "title": "certGenerator", + "type": "object" + }, + "clusterDomain": { + "default": "cluster.local", + "description": "The DNS cluster domain of your Kubernetes cluster.", + "required": [], + "title": "clusterDomain", + "type": "string" + }, "global": { "description": "Global values are values that can be accessed from any chart or subchart by exactly the same name.", "required": [], diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index fadd9f4489..a1197593d3 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -1,5 +1,8 @@ # yaml-language-server: $schema=values.schema.json +# -- The DNS cluster domain of your Kubernetes cluster. +clusterDomain: cluster.local + # -- The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. nginxGateway: # FIXME(lucacome): https://github.com/nginx/nginx-gateway-fabric/issues/2490 @@ -429,3 +432,21 @@ nginx: # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. debug: false + +# -- The certGenerator section contains the configuration for the cert-generator Job. +certGenerator: + # -- The annotations of the cert-generator Job. + annotations: {} + + # -- The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane + # to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric + # control plane is running in (default namespace: nginx-gateway). + serverTLSSecretName: server-tls + + # -- The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely + # communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the + # NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). + agentTLSSecretName: agent-tls + + # -- Overwrite existing TLS Secrets on startup. + overwrite: false diff --git a/cmd/gateway/certs.go b/cmd/gateway/certs.go new file mode 100644 index 0000000000..6f7a22d97e --- /dev/null +++ b/cmd/gateway/certs.go @@ -0,0 +1,230 @@ +package main + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" //nolint:gosec // using sha1 in this case is fine + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +const ( + expiry = 365 * 3 * 24 * time.Hour // 3 years + defaultDomain = "cluster.local" +) + +var subject = pkix.Name{ + CommonName: "nginx-gateway", + Country: []string{"US"}, + Locality: []string{"SEA"}, + Organization: []string{"F5"}, + OrganizationalUnit: []string{"NGINX"}, +} + +type certificateConfig struct { + caCertificate []byte + serverCertificate []byte + serverKey []byte + clientCertificate []byte + clientKey []byte +} + +// generateCertificates creates a CA, server, and client certificates and keys. +func generateCertificates(service, namespace, clientDNSDomain string) (*certificateConfig, error) { + caCertPEM, caKeyPEM, err := generateCA() + if err != nil { + return nil, fmt.Errorf("error generating CA: %w", err) + } + + caKeyPair, err := tls.X509KeyPair(caCertPEM, caKeyPEM) + if err != nil { + return nil, err + } + + serverCert, serverKey, err := generateCert(caKeyPair, serverDNSNames(service, namespace)) + if err != nil { + return nil, fmt.Errorf("error generating server cert: %w", err) + } + + clientCert, clientKey, err := generateCert(caKeyPair, clientDNSNames(clientDNSDomain)) + if err != nil { + return nil, fmt.Errorf("error generating client cert: %w", err) + } + + return &certificateConfig{ + caCertificate: caCertPEM, + serverCertificate: serverCert, + serverKey: serverKey, + clientCertificate: clientCert, + clientKey: clientKey, + }, nil +} + +func generateCA() ([]byte, []byte, error) { + caKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + ca := &x509.Certificate{ + Subject: subject, + NotBefore: time.Now(), + NotAfter: time.Now().Add(expiry), + SubjectKeyId: subjectKeyID(caKey.N), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + IsCA: true, + BasicConstraintsValid: true, + } + + caCertBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + + caCertPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caCertBytes, + }) + + caKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(caKey), + }) + + return caCertPEM, caKeyPEM, nil +} + +func generateCert(caKeyPair tls.Certificate, dnsNames []string) ([]byte, []byte, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + cert := &x509.Certificate{ + Subject: subject, + NotBefore: time.Now(), + NotAfter: time.Now().Add(expiry), + SubjectKeyId: subjectKeyID(key.N), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + DNSNames: dnsNames, + } + + caCert, err := x509.ParseCertificate(caKeyPair.Certificate[0]) + if err != nil { + return nil, nil, err + } + + certBytes, err := x509.CreateCertificate(rand.Reader, cert, caCert, &key.PublicKey, caKeyPair.PrivateKey) + if err != nil { + return nil, nil, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + keyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + }) + + return certPEM, keyPEM, nil +} + +// subjectKeyID generates the SubjectKeyID using the modulus of the private key. +func subjectKeyID(n *big.Int) []byte { + h := sha1.New() //nolint:gosec // using sha1 in this case is fine + h.Write(n.Bytes()) + return h.Sum(nil) +} + +func serverDNSNames(service, namespace string) []string { + return []string{ + fmt.Sprintf("%s.%s.svc", service, namespace), + } +} + +func clientDNSNames(dnsDomain string) []string { + return []string{ + fmt.Sprintf("*.%s", dnsDomain), + } +} + +func createSecrets( + ctx context.Context, + k8sClient client.Client, + certConfig *certificateConfig, + serverSecretName, + clientSecretName, + namespace string, + overwrite bool, +) error { + serverSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverSecretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "ca.crt": certConfig.caCertificate, + "tls.crt": certConfig.serverCertificate, + "tls.key": certConfig.serverKey, + }, + } + + clientSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientSecretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "ca.crt": certConfig.caCertificate, + "tls.crt": certConfig.clientCertificate, + "tls.key": certConfig.clientKey, + }, + } + + logger := ctlrZap.New().WithName("cert-generator") + for _, secret := range []corev1.Secret{serverSecret, clientSecret} { + key := client.ObjectKeyFromObject(&secret) + currentSecret := &corev1.Secret{} + + if err := k8sClient.Get(ctx, key, currentSecret); err != nil { + if apierrors.IsNotFound(err) { + if err := k8sClient.Create(ctx, &secret); err != nil { + return fmt.Errorf("error creating secret %v: %w", key, err) + } + } else { + return fmt.Errorf("error getting secret %v: %w", key, err) + } + } else { + if !overwrite { + logger.Info("Skipping updating Secret. Must be updated manually or by another source.", "name", key) + continue + } + + if !reflect.DeepEqual(secret.Data, currentSecret.Data) { + if err := k8sClient.Update(ctx, &secret); err != nil { + return fmt.Errorf("error updating secret %v: %w", key, err) + } + } + } + } + + return nil +} diff --git a/cmd/gateway/certs_test.go b/cmd/gateway/certs_test.go new file mode 100644 index 0000000000..4a9bfbe164 --- /dev/null +++ b/cmd/gateway/certs_test.go @@ -0,0 +1,139 @@ +package main + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestGenerateCertificates(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + certConfig, err := generateCertificates("nginx", "default", "cluster.local") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(certConfig).ToNot(BeNil()) + g.Expect(certConfig.caCertificate).ToNot(BeNil()) + g.Expect(certConfig.serverCertificate).ToNot(BeNil()) + g.Expect(certConfig.serverKey).ToNot(BeNil()) + g.Expect(certConfig.clientCertificate).ToNot(BeNil()) + g.Expect(certConfig.clientKey).ToNot(BeNil()) + + block, _ := pem.Decode(certConfig.caCertificate) + g.Expect(block).ToNot(BeNil()) + caCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(caCert.IsCA).To(BeTrue()) + + pool := x509.NewCertPool() + g.Expect(pool.AppendCertsFromPEM(certConfig.caCertificate)).To(BeTrue()) + + block, _ = pem.Decode(certConfig.serverCertificate) + g.Expect(block).ToNot(BeNil()) + serverCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + + _, err = serverCert.Verify(x509.VerifyOptions{ + DNSName: "nginx.default.svc", + Roots: pool, + }) + g.Expect(err).ToNot(HaveOccurred()) + + block, _ = pem.Decode(certConfig.clientCertificate) + g.Expect(block).ToNot(BeNil()) + clientCert, err := x509.ParseCertificate(block.Bytes) + g.Expect(err).ToNot(HaveOccurred()) + + _, err = clientCert.Verify(x509.VerifyOptions{ + DNSName: "*.cluster.local", + Roots: pool, + }) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCreateSecrets(t *testing.T) { + t.Parallel() + + fakeClient := fake.NewFakeClient() + + tests := []struct { + name string + overwrite bool + }{ + { + name: "doesn't overwrite on updates", + overwrite: false, + }, + { + name: "overwrites on updates", + overwrite: true, + }, + } + + verifySecrets := func(g *WithT, name string, overwrite bool) { + certConfig, err := generateCertificates("nginx", "default", "cluster.local") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(certConfig).ToNot(BeNil()) + + serverSecretName := fmt.Sprintf("%s-server-secret", name) + clientSecretName := fmt.Sprintf("%s-client-secret", name) + err = createSecrets(t.Context(), fakeClient, certConfig, serverSecretName, clientSecretName, "default", overwrite) + g.Expect(err).ToNot(HaveOccurred()) + + serverSecret := &corev1.Secret{} + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: serverSecretName, Namespace: "default"}, serverSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(serverSecret.Data["ca.crt"]).To(Equal(certConfig.caCertificate)) + g.Expect(serverSecret.Data["tls.crt"]).To(Equal(certConfig.serverCertificate)) + g.Expect(serverSecret.Data["tls.key"]).To(Equal(certConfig.serverKey)) + + clientSecret := &corev1.Secret{} + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: clientSecretName, Namespace: "default"}, clientSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(clientSecret.Data["ca.crt"]).To(Equal(certConfig.caCertificate)) + g.Expect(clientSecret.Data["tls.crt"]).To(Equal(certConfig.clientCertificate)) + g.Expect(clientSecret.Data["tls.key"]).To(Equal(certConfig.clientKey)) + + // If overwrite is false, then no updates should occur. If true, then updates should occur. + newCertConfig, err := generateCertificates("nginx", "default", "new-DNS-name") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(newCertConfig).ToNot(BeNil()) + g.Expect(newCertConfig).ToNot(Equal(certConfig)) + + err = createSecrets(t.Context(), fakeClient, newCertConfig, serverSecretName, clientSecretName, "default", overwrite) + g.Expect(err).ToNot(HaveOccurred()) + + expCertConfig := certConfig + if overwrite { + expCertConfig = newCertConfig + } + + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: serverSecretName, Namespace: "default"}, serverSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(serverSecret.Data["tls.crt"]).To(Equal(expCertConfig.serverCertificate)) + + err = fakeClient.Get(t.Context(), client.ObjectKey{Name: clientSecretName, Namespace: "default"}, clientSecret) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(clientSecret.Data["tls.crt"]).To(Equal(expCertConfig.clientCertificate)) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + name := "no-overwrite" + if test.overwrite { + name = "overwrite" + } + + verifySecrets(g, name, test.overwrite) + }) + } +} diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index efa990e29d..4d875cb6ac 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -17,6 +17,7 @@ import ( "k8s.io/klog/v2" ctlr "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + k8sConfig "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/log" ctlrZap "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -37,6 +38,9 @@ const ( gatewayCtlrNameUsageFmt = `The name of the Gateway controller. ` + `The controller name must be of the form: DOMAIN/PATH. The controller's domain is '%s'` plusFlag = "nginx-plus" + + serverTLSSecret = "server-tls" + agentTLSSecret = "agent-tls" ) func createRootCommand() *cobra.Command { @@ -58,6 +62,7 @@ func createControllerCommand() *cobra.Command { gatewayFlag = "gateway" configFlag = "config" serviceFlag = "service" + agentTLSSecretFlag = "agent-tls-secret" updateGCStatusFlag = "update-gatewayclass-status" metricsDisableFlag = "metrics-disable" metricsSecureFlag = "metrics-secure-serving" @@ -96,6 +101,10 @@ func createControllerCommand() *cobra.Command { serviceName = stringValidatingValue{ validator: validateResourceName, } + agentTLSSecretName = stringValidatingValue{ + validator: validateResourceName, + value: agentTLSSecret, + } disableMetrics bool metricsSecure bool metricsListenPort = intValidatingValue{ @@ -254,6 +263,7 @@ func createControllerCommand() *cobra.Command { }, SnippetsFilters: snippetsFilters, NginxDockerSecretNames: nginxDockerSecrets.values, + AgentTLSSecretName: agentTLSSecretName.value, } if err := static.StartManager(conf); err != nil { @@ -303,6 +313,14 @@ func createControllerCommand() *cobra.Command { ` Lives in the same Namespace as the controller.`, ) + cmd.Flags().Var( + &agentTLSSecretName, + agentTLSSecretFlag, + `The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely `+ + `communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, + ) + cmd.Flags().BoolVar( &updateGCStatus, updateGCStatusFlag, @@ -442,32 +460,105 @@ func createControllerCommand() *cobra.Command { return cmd } -// FIXME(pleshakov): Remove this command once NGF min supported Kubernetes version supports sleep action in -// preStop hook. -// See https://github.com/kubernetes/enhancements/tree/4ec371d92dcd4f56a2ab18c8ba20bb85d8d20efe/keps/sig-node/3960-pod-lifecycle-sleep-action -// -//nolint:lll -func createSleepCommand() *cobra.Command { +func createGenerateCertsCommand() *cobra.Command { // flag names - const durationFlag = "duration" + const ( + serverTLSSecretFlag = "server-tls-secret" //nolint:gosec // not credentials + agentTLSSecretFlag = "agent-tls-secret" + serviceFlag = "service" + clusterDomainFlag = "cluster-domain" + overwriteFlag = "overwrite" + ) + // flag values - var duration time.Duration + var ( + serverTLSSecretName = stringValidatingValue{ + validator: validateResourceName, + value: serverTLSSecret, + } + agentTLSSecretName = stringValidatingValue{ + validator: validateResourceName, + value: agentTLSSecret, + } + serviceName = stringValidatingValue{ + validator: validateResourceName, + } + clusterDomain = stringValidatingValue{ + validator: validateQualifiedName, + value: defaultDomain, + } + overwrite bool + ) cmd := &cobra.Command{ - Use: "sleep", - Short: "Sleep for specified duration and exit", - Run: func(_ *cobra.Command, _ []string) { - // It is expected that this command is run from lifecycle hook. - // Because logs from hooks are not visible in the container logs, we don't log here at all. - time.Sleep(duration) + Use: "generate-certs", + Short: "Generate self-signed certificates for securing control plane to data plane communication", + RunE: func(cmd *cobra.Command, _ []string) error { + namespace, err := getValueFromEnv("POD_NAMESPACE") + if err != nil { + return fmt.Errorf("POD_NAMESPACE must be specified in the ENV") + } + + certConfig, err := generateCertificates(serviceName.value, namespace, clusterDomain.value) + if err != nil { + return fmt.Errorf("error generating certificates: %w", err) + } + + k8sClient, err := client.New(k8sConfig.GetConfigOrDie(), client.Options{}) + if err != nil { + return fmt.Errorf("error creating k8s client: %w", err) + } + + if err := createSecrets( + cmd.Context(), + k8sClient, + certConfig, + serverTLSSecretName.value, + agentTLSSecretName.value, + namespace, + overwrite, + ); err != nil { + return fmt.Errorf("error creating secrets: %w", err) + } + + return nil }, } - cmd.Flags().DurationVar( - &duration, - durationFlag, - 30*time.Second, - "Set the duration of sleep. Must be parsable by https://pkg.go.dev/time#ParseDuration", + cmd.Flags().Var( + &serverTLSSecretName, + serverTLSSecretFlag, + `The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane `+ + `to securely communicate with the NGINX Agent. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, + ) + + cmd.Flags().Var( + &agentTLSSecretName, + agentTLSSecretFlag, + `The name of the base Secret containing TLS CA, certificate, and key for the NGINX Agent to securely `+ + `communicate with the NGINX Gateway Fabric control plane. Must exist in the same namespace that the `+ + `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, + ) + + cmd.Flags().Var( + &serviceName, + serviceFlag, + `The name of the Service that fronts the NGINX Gateway Fabric Pod.`+ + ` Lives in the same Namespace as the controller.`, + ) + + cmd.Flags().Var( + &clusterDomain, + clusterDomainFlag, + `The DNS domain of your Kubernetes cluster.`, + ) + + cmd.Flags().BoolVar( + &overwrite, + overwriteFlag, + false, + "Overwrite existing certificates.", ) return cmd @@ -564,6 +655,37 @@ func createInitializeCommand() *cobra.Command { return cmd } +// FIXME(pleshakov): Remove this command once NGF min supported Kubernetes version supports sleep action in +// preStop hook. +// See https://github.com/kubernetes/enhancements/tree/4ec371d92dcd4f56a2ab18c8ba20bb85d8d20efe/keps/sig-node/3960-pod-lifecycle-sleep-action +// +//nolint:lll +func createSleepCommand() *cobra.Command { + // flag names + const durationFlag = "duration" + // flag values + var duration time.Duration + + cmd := &cobra.Command{ + Use: "sleep", + Short: "Sleep for specified duration and exit", + Run: func(_ *cobra.Command, _ []string) { + // It is expected that this command is run from lifecycle hook. + // Because logs from hooks are not visible in the container logs, we don't log here at all. + time.Sleep(duration) + }, + } + + cmd.Flags().DurationVar( + &duration, + durationFlag, + 30*time.Second, + "Set the duration of sleep. Must be parsable by https://pkg.go.dev/time#ParseDuration", + ) + + return cmd +} + func parseFlags(flags *pflag.FlagSet) ([]string, []string) { var flagKeys, flagValues []string diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 61459455f6..8662c4ef0d 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -129,7 +129,7 @@ func TestCommonFlagsValidation(t *testing.T) { } } -func TestStaticModeCmdFlagValidation(t *testing.T) { +func TestControllerCmdFlagValidation(t *testing.T) { t.Parallel() tests := []flagTestCase{ { @@ -140,6 +140,7 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { "--gateway=nginx-gateway/nginx", "--config=nginx-gateway-config", "--service=nginx-gateway", + "--agent-tls-secret=agent-tls", "--update-gatewayclass-status=true", "--metrics-port=9114", "--metrics-disable", @@ -217,6 +218,22 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { wantErr: true, expectedErrPrefix: `invalid argument "!@#$" for "--service" flag: invalid format`, }, + { + name: "agent-tls-secret is set to empty string", + args: []string{ + "--agent-tls-secret=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--agent-tls-secret" flag: must be set`, + }, + { + name: "agent-tls-secret is set to invalid string", + args: []string{ + "--agent-tls-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--agent-tls-secret" flag: invalid format`, + }, { name: "update-gatewayclass-status is set to empty string", args: []string{ @@ -441,13 +458,18 @@ func TestStaticModeCmdFlagValidation(t *testing.T) { } } -func TestSleepCmdFlagValidation(t *testing.T) { +func TestGenerateCertsCmdFlagValidation(t *testing.T) { t.Parallel() + tests := []flagTestCase{ { name: "valid flags", args: []string{ - "--duration=1s", + "--server-tls-secret=server-secret", + "--agent-tls-secret=agent-secret", + "--service=my-service", + "--cluster-domain=cluster.local", + "--overwrite", }, wantErr: false, }, @@ -457,27 +479,75 @@ func TestSleepCmdFlagValidation(t *testing.T) { wantErr: false, }, { - name: "duration is set to empty string", + name: "server-tls-secret is set to empty string", args: []string{ - "--duration=", + "--server-tls-secret=", }, wantErr: true, - expectedErrPrefix: `invalid argument "" for "--duration" flag: time: invalid duration ""`, + expectedErrPrefix: `invalid argument "" for "--server-tls-secret" flag: must be set`, }, { - name: "duration is invalid", + name: "server-tls-secret is invalid", args: []string{ - "--duration=invalid", + "--server-tls-secret=!@#$", }, wantErr: true, - expectedErrPrefix: `invalid argument "invalid" for "--duration" flag: time: invalid duration "invalid"`, + expectedErrPrefix: `invalid argument "!@#$" for "--server-tls-secret" flag: invalid format`, + }, + { + name: "agent-tls-secret is set to empty string", + args: []string{ + "--agent-tls-secret=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--agent-tls-secret" flag: must be set`, + }, + { + name: "agent-tls-secret is invalid", + args: []string{ + "--agent-tls-secret=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--agent-tls-secret" flag: invalid format`, + }, + { + name: "service is set to empty string", + args: []string{ + "--service=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--service" flag: must be set`, + }, + { + name: "service is invalid", + args: []string{ + "--service=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--service" flag: invalid format`, + }, + { + name: "cluster-domain is set to empty string", + args: []string{ + "--cluster-domain=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--cluster-domain" flag: must be set`, + }, + { + name: "cluster-domain is invalid", + args: []string{ + "--cluster-domain=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--cluster-domain" flag: invalid format`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() - cmd := createSleepCommand() + cmd := createGenerateCertsCommand() testFlag(t, cmd, test) }) } @@ -529,6 +599,48 @@ func TestInitializeCmdFlagValidation(t *testing.T) { } } +func TestSleepCmdFlagValidation(t *testing.T) { + t.Parallel() + tests := []flagTestCase{ + { + name: "valid flags", + args: []string{ + "--duration=1s", + }, + wantErr: false, + }, + { + name: "omitted flags", + args: nil, + wantErr: false, + }, + { + name: "duration is set to empty string", + args: []string{ + "--duration=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--duration" flag: time: invalid duration ""`, + }, + { + name: "duration is invalid", + args: []string{ + "--duration=invalid", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "invalid" for "--duration" flag: time: invalid duration "invalid"`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + cmd := createSleepCommand() + testFlag(t, cmd, test) + }) + } +} + func TestParseFlags(t *testing.T) { t.Parallel() g := NewWithT(t) diff --git a/cmd/gateway/main.go b/cmd/gateway/main.go index 203385b732..515fcc3f16 100644 --- a/cmd/gateway/main.go +++ b/cmd/gateway/main.go @@ -22,6 +22,7 @@ func main() { rootCmd.AddCommand( createControllerCommand(), + createGenerateCertsCommand(), createInitializeCommand(), createSleepCommand(), ) diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml index c6309d395e..3f82b66bce 100644 --- a/deploy/aws-nlb/deploy.yaml +++ b/deploy/aws-nlb/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,6 +173,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -205,6 +258,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -253,11 +307,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml index 88ce668193..6fd37f8a86 100644 --- a/deploy/azure/deploy.yaml +++ b/deploy/azure/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,6 +173,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -205,6 +258,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -253,6 +307,9 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls nodeSelector: kubernetes.io/os: linux securityContext: @@ -260,6 +317,56 @@ spec: runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml index b73922cdae..28f9bbec55 100644 --- a/deploy/default/deploy.yaml +++ b/deploy/default/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,6 +173,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -205,6 +258,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -253,11 +307,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 23d4223234..68bdf72e43 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -142,6 +177,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -209,6 +262,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license @@ -261,11 +315,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/experimental/deploy.yaml b/deploy/experimental/deploy.yaml index 7ae7821f26..be7273edd4 100644 --- a/deploy/experimental/deploy.yaml +++ b/deploy/experimental/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -142,6 +177,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -209,6 +262,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -258,11 +312,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index b6b6b1ca58..7bdb4fe3c9 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,6 +173,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -205,6 +258,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license @@ -256,11 +310,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml index 206401aac1..909270a96b 100644 --- a/deploy/nodeport/deploy.yaml +++ b/deploy/nodeport/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -138,6 +173,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -205,6 +258,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -253,11 +307,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index 0a62309fcc..d61f5e49ac 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -148,6 +183,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -215,6 +268,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -263,11 +317,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass @@ -354,3 +461,5 @@ supplementalGroups: type: MustRunAs users: - system:serviceaccount:nginx-gateway:nginx-gateway +volumes: +- secret diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index a25c7e1aa3..e6b27f01fb 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -140,6 +175,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -207,6 +260,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license @@ -259,11 +313,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/deploy/snippets-filters/deploy.yaml b/deploy/snippets-filters/deploy.yaml index 288058bac2..714376f7f7 100644 --- a/deploy/snippets-filters/deploy.yaml +++ b/deploy/snippets-filters/deploy.yaml @@ -13,6 +13,35 @@ metadata: name: nginx-gateway namespace: nginx-gateway --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update + - get +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -74,6 +103,12 @@ rules: verbs: - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create - apiGroups: - gateway.networking.k8s.io resources: @@ -140,6 +175,24 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-gateway-cert-generator +subjects: +- kind: ServiceAccount + name: nginx-gateway-cert-generator + namespace: nginx-gateway +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: @@ -207,6 +260,7 @@ spec: - --gatewayclass=nginx - --config=nginx-gateway-config - --service=nginx-gateway + - --agent-tls-secret=agent-tls - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election @@ -256,11 +310,64 @@ spec: runAsUser: 101 seccompProfile: type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/secrets/ngf + name: nginx-agent-tls securityContext: fsGroup: 1001 runAsNonRoot: true serviceAccountName: nginx-gateway terminationGracePeriodSeconds: 30 + volumes: + - name: nginx-agent-tls + secret: + secretName: server-tls +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-cert-generator + namespace: nginx-gateway +spec: + template: + metadata: + annotations: null + spec: + containers: + - args: + - generate-certs + - --service=nginx-gateway + - --cluster-domain=cluster.local + - --server-tls-secret=server-tls + - --agent-tls-secret=agent-tls + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/nginx/nginx-gateway-fabric:edge + imagePullPolicy: Always + name: cert-generator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1001 + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 1001 + runAsNonRoot: true + serviceAccountName: nginx-gateway-cert-generator + ttlSecondsAfterFinished: 0 --- apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass diff --git a/go.mod b/go.mod index 8d6f09cc9c..29b388c153 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/nginx/nginx-gateway-fabric go 1.24.2 require ( + github.com/fsnotify/fsnotify v1.8.0 github.com/go-kit/log v0.2.1 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 @@ -38,7 +39,6 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/internal/framework/controller/index/pod.go b/internal/framework/controller/index/pod.go new file mode 100644 index 0000000000..2cd5cf6818 --- /dev/null +++ b/internal/framework/controller/index/pod.go @@ -0,0 +1,19 @@ +package index + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PodIPIndexFunc is a client.IndexerFunc that parses a Pod object and returns the PodIP. +// Used by the gRPC token validator for validating a connection from NGINX agent. +func PodIPIndexFunc(obj client.Object) []string { + pod, ok := obj.(*corev1.Pod) + if !ok { + panic(fmt.Sprintf("expected an Pod; got %T", obj)) + } + + return []string{pod.Status.PodIP} +} diff --git a/internal/framework/controller/index/pod_test.go b/internal/framework/controller/index/pod_test.go new file mode 100644 index 0000000000..e89c0492da --- /dev/null +++ b/internal/framework/controller/index/pod_test.go @@ -0,0 +1,53 @@ +package index + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestPodIPIndexFunc(t *testing.T) { + t.Parallel() + testcases := []struct { + msg string + obj client.Object + expOutput []string + }{ + { + msg: "normal case", + obj: &corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + expOutput: []string{"1.2.3.4"}, + }, + { + msg: "empty status", + obj: &corev1.Pod{}, + expOutput: []string{""}, + }, + } + + for _, tc := range testcases { + t.Run(tc.msg, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + output := PodIPIndexFunc(tc.obj) + g.Expect(output).To(Equal(tc.expOutput)) + }) + } +} + +func TestPodIPIndexFuncPanics(t *testing.T) { + t.Parallel() + defer func() { + g := NewWithT(t) + g.Expect(recover()).ToNot(BeNil()) + }() + + PodIPIndexFunc(&corev1.Namespace{}) +} diff --git a/internal/framework/controller/register.go b/internal/framework/controller/register.go index c76db1f577..557438da98 100644 --- a/internal/framework/controller/register.go +++ b/internal/framework/controller/register.go @@ -96,7 +96,7 @@ func Register( } for field, indexerFunc := range cfg.fieldIndices { - if err := addIndex( + if err := AddIndex( ctx, mgr.GetFieldIndexer(), objectType, @@ -136,7 +136,7 @@ func Register( return nil } -func addIndex( +func AddIndex( ctx context.Context, indexer client.FieldIndexer, objectType ngftypes.ObjectType, diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index d8556e19f2..d37c6b55f7 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -32,6 +32,8 @@ type Config struct { ConfigName string // GatewayClassName is the name of the GatewayClass resource that the Gateway will use. GatewayClassName string + // AgentTLSSecretName is the name of the TLS Secret used by NGINX Agent to communicate with the control plane. + AgentTLSSecretName string // NginxDockerSecretNames are the names of any Docker registry Secrets for the NGINX container. NginxDockerSecretNames []string // LeaderElection contains the configuration for leader election. diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 3459a7134f..bc68ba5510 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "google.golang.org/grpc" appsv1 "k8s.io/api/apps/v1" + authv1 "k8s.io/api/authentication/v1" apiv1 "k8s.io/api/core/v1" discoveryV1 "k8s.io/api/discovery/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -91,6 +92,7 @@ func init() { utilruntime.Must(ngfAPIv1alpha2.AddToScheme(scheme)) utilruntime.Must(apiext.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(authv1.AddToScheme(scheme)) } func StartManager(cfg config.Config) error { @@ -172,13 +174,21 @@ func StartManager(cfg config.Config) error { }) statusQueue := status.NewQueue() + resetConnChan := make(chan struct{}) nginxUpdater := agent.NewNginxUpdater( cfg.Logger.WithName("nginxUpdater"), mgr.GetAPIReader(), statusQueue, + resetConnChan, cfg.Plus, ) + tokenAudience := fmt.Sprintf( + "%s.%s.svc", + cfg.GatewayPodConfig.ServiceName, + cfg.GatewayPodConfig.Namespace, + ) + grpcServer := agentgrpc.NewServer( cfg.Logger.WithName("agentGRPCServer"), grpcServerPort, @@ -186,6 +196,9 @@ func StartManager(cfg config.Config) error { nginxUpdater.CommandService.Register, nginxUpdater.FileService.Register, }, + mgr.GetClient(), + tokenAudience, + resetConnChan, ) if err = mgr.Add(&runnables.LeaderOrNonLeader{Runnable: grpcServer}); err != nil { @@ -202,6 +215,7 @@ func StartManager(cfg config.Config) error { EventRecorder: recorder, GatewayPodConfig: &cfg.GatewayPodConfig, GCName: cfg.GatewayClassName, + AgentTLSSecretName: cfg.AgentTLSSecretName, Plus: cfg.Plus, NginxDockerSecretNames: cfg.NginxDockerSecretNames, PlusUsageConfig: &cfg.UsageReportConfig, @@ -362,6 +376,19 @@ func createManager(cfg config.Config, healthChecker *graphBuiltHealthChecker) (m } } + // Add an indexer to get pods by their IP address. This is used when validating that an agent + // connection is coming from the right place. + var podIPIndexFunc client.IndexerFunc = index.PodIPIndexFunc + if err := controller.AddIndex( + context.Background(), + mgr.GetFieldIndexer(), + &apiv1.Pod{}, + "status.podIP", + podIPIndexFunc, + ); err != nil { + return nil, fmt.Errorf("error adding pod IP indexer: %w", err) + } + return mgr, nil } diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 1d839c3bc9..7bc0818214 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -46,6 +46,7 @@ func NewNginxUpdater( logger logr.Logger, reader client.Reader, statusQueue *status.Queue, + resetConnChan <-chan struct{}, plus bool, ) *NginxUpdaterImpl { connTracker := agentgrpc.NewConnectionsTracker() @@ -57,6 +58,7 @@ func NewNginxUpdater( nginxDeployments, connTracker, statusQueue, + resetConnChan, ) fileService := newFileService(logger.WithName("fileService"), nginxDeployments, connTracker) diff --git a/internal/mode/static/nginx/agent/agent_test.go b/internal/mode/static/nginx/agent/agent_test.go index b159d5be5c..3266003981 100644 --- a/internal/mode/static/nginx/agent/agent_test.go +++ b/internal/mode/static/nginx/agent/agent_test.go @@ -51,7 +51,7 @@ func TestUpdateConfig(t *testing.T) { fakeBroadcaster.SendReturns(test.configApplied) plus := false - updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, plus) + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, plus) deployment := &Deployment{ broadcaster: fakeBroadcaster, podStatuses: make(map[string]error), @@ -142,7 +142,7 @@ func TestUpdateUpstreamServers(t *testing.T) { fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} fakeBroadcaster.SendReturns(test.configApplied) - updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, test.plus) + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, test.plus) updater.retryTimeout = 0 deployment := &Deployment{ diff --git a/internal/mode/static/nginx/agent/command.go b/internal/mode/static/nginx/agent/command.go index d5be137cd4..8f694e581d 100644 --- a/internal/mode/static/nginx/agent/command.go +++ b/internal/mode/static/nginx/agent/command.go @@ -36,6 +36,7 @@ type commandService struct { pb.CommandServiceServer nginxDeployments *DeploymentStore statusQueue *status.Queue + resetConnChan <-chan struct{} connTracker agentgrpc.ConnectionsTracker k8sReader client.Reader logger logr.Logger @@ -48,6 +49,7 @@ func newCommandService( depStore *DeploymentStore, connTracker agentgrpc.ConnectionsTracker, statusQueue *status.Queue, + resetConnChan <-chan struct{}, ) *commandService { return &commandService{ connectionTimeout: connectionWaitTimeout, @@ -56,6 +58,7 @@ func newCommandService( connTracker: connTracker, nginxDeployments: depStore, statusQueue: statusQueue, + resetConnChan: resetConnChan, } } @@ -158,7 +161,7 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error // `updateNginxConfig`. The entire transaction (as described in above in the function comment) // must be locked to prevent the deployment files from changing during the transaction. // This means that the lock is held until we receive either an error or response from agent - // (via msgr.Errors() or msgr.Mesages()) and respond back, finally returning to the event handler + // (via msgr.Errors() or msgr.Messages()) and respond back, finally returning to the event handler // which releases the lock. select { case <-ctx.Done(): @@ -167,6 +170,8 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error default: } return grpcStatus.Error(codes.Canceled, context.Cause(ctx).Error()) + case <-cs.resetConnChan: + return grpcStatus.Error(codes.Unavailable, "TLS files updated") case msg := <-channels.ListenCh: var req *pb.ManagementPlaneRequest switch msg.Type { diff --git a/internal/mode/static/nginx/agent/command_test.go b/internal/mode/static/nginx/agent/command_test.go index 714ffaafe5..167aab860a 100644 --- a/internal/mode/static/nginx/agent/command_test.go +++ b/internal/mode/static/nginx/agent/command_test.go @@ -208,6 +208,7 @@ func TestCreateConnection(t *testing.T) { NewDeploymentStore(&connTracker), &connTracker, status.NewQueue(), + nil, ) resp, err := cs.CreateConnection(test.ctx, test.request) @@ -304,6 +305,7 @@ func TestSubscribe(t *testing.T) { store, &connTracker, status.NewQueue(), + nil, ) broadcaster := &broadcastfakes.FakeBroadcaster{} @@ -412,6 +414,78 @@ func TestSubscribe(t *testing.T) { g.Expect(deployment.podStatuses).ToNot(HaveKey("nginx-pod")) } +func TestSubscribe_Reset(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + connTracker := agentgrpcfakes.FakeConnectionsTracker{} + conn := agentgrpc.Connection{ + Parent: types.NamespacedName{Namespace: "test", Name: "nginx-deployment"}, + PodName: "nginx-pod", + InstanceID: "nginx-id", + } + connTracker.GetConnectionReturns(conn) + + store := NewDeploymentStore(&connTracker) + resetChan := make(chan struct{}) + cs := newCommandService( + logr.Discard(), + fake.NewFakeClient(), + store, + &connTracker, + status.NewQueue(), + resetChan, + ) + + broadcaster := &broadcastfakes.FakeBroadcaster{} + responseCh := make(chan struct{}) + listenCh := make(chan broadcast.NginxAgentMessage, 2) + subChannels := broadcast.SubscriberChannels{ + ListenCh: listenCh, + ResponseCh: responseCh, + } + broadcaster.SubscribeReturns(subChannels) + + // set the initial files to be applied by the Subscription + deployment := store.StoreWithBroadcaster(conn.Parent, broadcaster) + files := []File{ + { + Meta: &pb.FileMeta{ + Name: "nginx.conf", + Hash: "12345", + }, + Contents: []byte("file contents"), + }, + } + deployment.SetFiles(files) + + ctx, cancel := createGrpcContextWithCancel() + defer cancel() + + mockServer := newMockSubscribeServer(ctx) + + // start the Subscriber + errCh := make(chan error) + go func() { + errCh <- cs.Subscribe(mockServer) + }() + + // ensure initial config is read to unblock read channel + mockServer.recvChan <- &pb.DataPlaneResponse{ + CommandResponse: &pb.CommandResponse{ + Status: pb.CommandResponse_COMMAND_STATUS_OK, + }, + } + + resetChan <- struct{}{} + + g.Eventually(func() error { + err := <-errCh + g.Expect(err).To(HaveOccurred()) + return err + }).Should(MatchError(ContainSubstring("TLS files updated"))) +} + func TestSubscribe_Errors(t *testing.T) { t.Parallel() @@ -465,6 +539,7 @@ func TestSubscribe_Errors(t *testing.T) { NewDeploymentStore(&connTracker), &connTracker, status.NewQueue(), + nil, ) if test.setup != nil { @@ -580,6 +655,7 @@ func TestSetInitialConfig_Errors(t *testing.T) { NewDeploymentStore(&connTracker), &connTracker, status.NewQueue(), + nil, ) conn := &agentgrpc.Connection{ @@ -765,6 +841,7 @@ func TestGetPodOwner(t *testing.T) { NewDeploymentStore(nil), nil, status.NewQueue(), + nil, ) owner, err := cs.getPodOwner(test.podName) @@ -864,6 +941,7 @@ func TestUpdateDataPlaneStatus(t *testing.T) { NewDeploymentStore(&connTracker), &connTracker, status.NewQueue(), + nil, ) resp, err := cs.UpdateDataPlaneStatus(test.ctx, test.request) @@ -902,6 +980,7 @@ func TestUpdateDataPlaneHealth(t *testing.T) { NewDeploymentStore(&connTracker), &connTracker, status.NewQueue(), + nil, ) resp, err := cs.UpdateDataPlaneHealth(context.Background(), &pb.UpdateDataPlaneHealthRequest{}) diff --git a/internal/mode/static/nginx/agent/grpc/connections.go b/internal/mode/static/nginx/agent/grpc/connections.go index e0534a78f9..0bae634ccc 100644 --- a/internal/mode/static/nginx/agent/grpc/connections.go +++ b/internal/mode/static/nginx/agent/grpc/connections.go @@ -48,9 +48,6 @@ func NewConnectionsTracker() ConnectionsTracker { } // Track adds a connection to the tracking map. -// TODO(sberman): we need to handle the case when the token expires (once we support the token). -// This likely involves setting a callback to cancel a context when the token expires, which triggers -// the connection to be removed from the tracking list. func (c *AgentConnectionsTracker) Track(key string, conn Connection) { c.lock.Lock() defer c.lock.Unlock() diff --git a/internal/mode/static/nginx/agent/grpc/context/context.go b/internal/mode/static/nginx/agent/grpc/context/context.go index f8daf457eb..a3bb0d3642 100644 --- a/internal/mode/static/nginx/agent/grpc/context/context.go +++ b/internal/mode/static/nginx/agent/grpc/context/context.go @@ -6,6 +6,7 @@ import ( // GrpcInfo for storing identity information for the gRPC client. type GrpcInfo struct { + Token string `json:"token"` // auth token that was provided by the gRPC client IPAddress string `json:"ip_address"` // ip address of the agent } diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go b/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go new file mode 100644 index 0000000000..cd54f18c44 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/doc.go @@ -0,0 +1,4 @@ +/* +Package filewatcher contains the functions to watch for TLS file updates for the gRPC server. +*/ +package filewatcher diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go new file mode 100644 index 0000000000..2d79f4047c --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher.go @@ -0,0 +1,106 @@ +package filewatcher + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-logr/logr" +) + +const monitoringInterval = 5 * time.Second + +var emptyEvent = fsnotify.Event{ + Name: "", + Op: 0, +} + +// FileWatcher watches for changes to files and notifies the channel when a change occurs. +type FileWatcher struct { + filesChanged *atomic.Bool + watcher *fsnotify.Watcher + notifyCh chan<- struct{} + logger logr.Logger + filesToWatch []string + interval time.Duration +} + +// NewFileWatcher creates a new FileWatcher instance. +func NewFileWatcher(logger logr.Logger, files []string, notifyCh chan<- struct{}) (*FileWatcher, error) { + filesChanged := &atomic.Bool{} + + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, fmt.Errorf("failed to initialize TLS file watcher: %w", err) + } + + return &FileWatcher{ + filesChanged: filesChanged, + watcher: watcher, + logger: logger, + filesToWatch: files, + notifyCh: notifyCh, + interval: monitoringInterval, + }, nil +} + +// Watch starts the watch for file changes. +func (w *FileWatcher) Watch(ctx context.Context) { + w.logger.V(1).Info("Starting file watcher") + + ticker := time.NewTicker(w.interval) + for _, file := range w.filesToWatch { + w.addWatcher(file) + } + + for { + select { + case <-ctx.Done(): + if err := w.watcher.Close(); err != nil { + w.logger.Error(err, "unable to close file watcher") + } + return + case event := <-w.watcher.Events: + w.handleEvent(event) + case <-ticker.C: + w.checkForUpdates() + case err := <-w.watcher.Errors: + w.logger.Error(err, "error watching file") + } + } +} + +func (w *FileWatcher) addWatcher(path string) { + if err := w.watcher.Add(path); err != nil { + w.logger.Error(err, "failed to watch file", "file", path) + } +} + +func (w *FileWatcher) handleEvent(event fsnotify.Event) { + if isEventSkippable(event) { + return + } + + if event.Has(fsnotify.Remove) || event.Has(fsnotify.Rename) { + w.addWatcher(event.Name) + } + + w.filesChanged.Store(true) +} + +func (w *FileWatcher) checkForUpdates() { + if w.filesChanged.Load() { + w.logger.Info("TLS files changed, sending notification to reset nginx agent connections") + w.notifyCh <- struct{}{} + w.filesChanged.Store(false) + } +} + +func isEventSkippable(event fsnotify.Event) bool { + return event == emptyEvent || + event.Name == "" || + event.Has(fsnotify.Chmod) || + event.Has(fsnotify.Create) +} diff --git a/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go new file mode 100644 index 0000000000..1840e78849 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/filewatcher/filewatcher_test.go @@ -0,0 +1,69 @@ +package filewatcher + +import ( + "context" + "os" + "path" + "testing" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" +) + +func TestFileWatcher_Watch(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + notifyCh := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + file := path.Join(os.TempDir(), "test-file") + _, err := os.Create(file) + g.Expect(err).ToNot(HaveOccurred()) + defer os.Remove(file) + + w, err := NewFileWatcher(logr.Discard(), []string{file}, notifyCh) + g.Expect(err).ToNot(HaveOccurred()) + w.interval = 300 * time.Millisecond + + go w.Watch(ctx) + + w.watcher.Events <- fsnotify.Event{Name: file, Op: fsnotify.Write} + g.Eventually(func() bool { + return w.filesChanged.Load() + }).Should(BeTrue()) + + g.Eventually(notifyCh).Should(Receive()) +} + +func TestFileWatcher_handleEvent(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + w, err := NewFileWatcher(logr.Discard(), []string{"test-file"}, nil) + g.Expect(err).ToNot(HaveOccurred()) + + w.handleEvent(fsnotify.Event{Op: fsnotify.Write}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-chmod", Op: fsnotify.Chmod}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-create", Op: fsnotify.Create}) + g.Expect(w.filesChanged.Load()).To(BeFalse()) + + w.handleEvent(fsnotify.Event{Name: "test-write", Op: fsnotify.Write}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) + + w.handleEvent(fsnotify.Event{Name: "test-remove", Op: fsnotify.Remove}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) + + w.handleEvent(fsnotify.Event{Name: "test-rename", Op: fsnotify.Rename}) + g.Expect(w.filesChanged.Load()).To(BeTrue()) + w.filesChanged.Store(false) +} diff --git a/internal/mode/static/nginx/agent/grpc/grpc.go b/internal/mode/static/nginx/agent/grpc/grpc.go index a4f2a31268..f995756584 100644 --- a/internal/mode/static/nginx/agent/grpc/grpc.go +++ b/internal/mode/static/nginx/agent/grpc/grpc.go @@ -2,31 +2,41 @@ package grpc import ( "context" + "crypto/tls" + "crypto/x509" + "errors" "fmt" "net" + "os" "time" "github.com/go-logr/logr" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/filewatcher" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/interceptor" ) const ( keepAliveTime = 15 * time.Second keepAliveTimeout = 10 * time.Second + caCertPath = "/var/run/secrets/ngf/ca.crt" + tlsCertPath = "/var/run/secrets/ngf/tls.crt" + tlsKeyPath = "/var/run/secrets/ngf/tls.key" ) var ErrStatusInvalidConnection = status.Error(codes.Unauthenticated, "invalid connection") // Interceptor provides hooks to intercept the execution of an RPC on the server. type Interceptor interface { - Stream() grpc.StreamServerInterceptor - Unary() grpc.UnaryServerInterceptor + Stream(logr.Logger) grpc.StreamServerInterceptor + Unary(logr.Logger) grpc.UnaryServerInterceptor } // Server is a gRPC server for communicating with the nginx agent. @@ -35,6 +45,10 @@ type Server struct { interceptor Interceptor logger logr.Logger + + // resetConnChan is used by the filewatcher to trigger the Command service to + // reset any connections when TLS files are updated. + resetConnChan chan<- struct{} // RegisterServices is a list of functions to register gRPC services to the gRPC server. registerServices []func(*grpc.Server) // Port is the port that the server is listening on. @@ -42,12 +56,20 @@ type Server struct { port int } -func NewServer(logger logr.Logger, port int, registerSvcs []func(*grpc.Server)) *Server { +func NewServer( + logger logr.Logger, + port int, + registerSvcs []func(*grpc.Server), + k8sClient client.Client, + tokenAudience string, + resetConnChan chan<- struct{}, +) *Server { return &Server{ logger: logger, port: port, registerServices: registerSvcs, - interceptor: interceptor.NewContextSetter(), + interceptor: interceptor.NewContextSetter(k8sClient, tokenAudience), + resetConnChan: resetConnChan, } } @@ -58,6 +80,11 @@ func (g *Server) Start(ctx context.Context) error { return err } + tlsCredentials, err := getTLSConfig() + if err != nil { + return err + } + server := grpc.NewServer( grpc.KeepaliveParams( keepalive.ServerParameters{ @@ -71,14 +98,23 @@ func (g *Server) Start(ctx context.Context) error { PermitWithoutStream: true, }, ), - grpc.ChainStreamInterceptor(g.interceptor.Stream()), - grpc.ChainUnaryInterceptor(g.interceptor.Unary()), + grpc.ChainStreamInterceptor(g.interceptor.Stream(g.logger)), + grpc.ChainUnaryInterceptor(g.interceptor.Unary(g.logger)), + grpc.Creds(tlsCredentials), ) for _, registerSvc := range g.registerServices { registerSvc(server) } + tlsFiles := []string{caCertPath, tlsCertPath, tlsKeyPath} + fileWatcher, err := filewatcher.NewFileWatcher(g.logger.WithName("fileWatcher"), tlsFiles, g.resetConnChan) + if err != nil { + return err + } + + go fileWatcher.Watch(ctx) + go func() { <-ctx.Done() g.logger.Info("Shutting down GRPC Server") @@ -89,4 +125,30 @@ func (g *Server) Start(ctx context.Context) error { return server.Serve(listener) } +func getTLSConfig() (credentials.TransportCredentials, error) { + caPem, err := os.ReadFile(caCertPath) + if err != nil { + return nil, err + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caPem) { + return nil, errors.New("error parsing CA PEM") + } + + getCertificateCallback := func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + serverCert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + return &serverCert, err + } + + tlsConfig := &tls.Config{ + GetCertificate: getCertificateCallback, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: certPool, + MinVersion: tls.VersionTLS13, + } + + return credentials.NewTLS(tlsConfig), nil +} + var _ manager.Runnable = &Server{} diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go index 3139da3cec..87517c5875 100644 --- a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go +++ b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor.go @@ -4,15 +4,29 @@ import ( "context" "fmt" "net" + "strings" + "time" + "github.com/go-logr/logr" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" grpcContext "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/context" ) +const ( + headerUUID = "uuid" + headerAuth = "authorization" +) + // streamHandler is a struct that implements StreamHandler, allowing the interceptor to replace the context. type streamHandler struct { grpc.ServerStream @@ -23,21 +37,28 @@ func (sh *streamHandler) Context() context.Context { return sh.ctx } -type ContextSetter struct{} +type ContextSetter struct { + k8sClient client.Client + audience string +} -func NewContextSetter() ContextSetter { - return ContextSetter{} +func NewContextSetter(k8sClient client.Client, audience string) ContextSetter { + return ContextSetter{ + k8sClient: k8sClient, + audience: audience, + } } -func (c ContextSetter) Stream() grpc.StreamServerInterceptor { +func (c ContextSetter) Stream(logger logr.Logger) grpc.StreamServerInterceptor { return func( - srv interface{}, + srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler, ) error { - ctx, err := setContext(ss.Context()) + ctx, err := c.validateConnection(ss.Context()) if err != nil { + logger.Error(err, "error validating connection") return err } return handler(srv, &streamHandler{ @@ -47,24 +68,48 @@ func (c ContextSetter) Stream() grpc.StreamServerInterceptor { } } -func (c ContextSetter) Unary() grpc.UnaryServerInterceptor { +func (c ContextSetter) Unary(logger logr.Logger) grpc.UnaryServerInterceptor { return func( ctx context.Context, - req interface{}, + req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler, - ) (resp interface{}, err error) { - if ctx, err = setContext(ctx); err != nil { + ) (resp any, err error) { + if ctx, err = c.validateConnection(ctx); err != nil { + logger.Error(err, "error validating connection") return nil, err } return handler(ctx, req) } } -// TODO(sberman): for now, we'll just use the IP address of the agent to link a Connection -// to a Subscription by setting it in the context. Once we support auth, we can likely change this -// interceptor to instead set the uuid. -func setContext(ctx context.Context) (context.Context, error) { +// validateConnection checks that the connection is valid and returns a new +// context containing information used by the gRPC command/file services. +func (c ContextSetter) validateConnection(ctx context.Context) (context.Context, error) { + gi, err := getGrpcInfo(ctx) + if err != nil { + return nil, err + } + + return c.validateToken(ctx, gi) +} + +func getGrpcInfo(ctx context.Context) (*grpcContext.GrpcInfo, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.InvalidArgument, "no metadata") + } + + id := md.Get(headerUUID) + if len(id) == 0 { + return nil, status.Error(codes.Unauthenticated, "no identity") + } + + auths := md.Get(headerAuth) + if len(auths) == 0 { + return nil, status.Error(codes.Unauthenticated, "no authorization") + } + p, ok := peer.FromContext(ctx) if !ok { return nil, status.Error(codes.InvalidArgument, "no peer data") @@ -75,8 +120,76 @@ func setContext(ctx context.Context) (context.Context, error) { panic(fmt.Sprintf("address %q was not of type net.TCPAddr", p.Addr.String())) } - gi := &grpcContext.GrpcInfo{ + return &grpcContext.GrpcInfo{ + Token: auths[0], IPAddress: addr.IP.String(), + }, nil +} + +func (c ContextSetter) validateToken(ctx context.Context, gi *grpcContext.GrpcInfo) (context.Context, error) { + tokenReview := &authv1.TokenReview{ + Spec: authv1.TokenReviewSpec{ + Audiences: []string{c.audience}, + Token: gi.Token, + }, + } + + createCtx, createCancel := context.WithTimeout(ctx, 30*time.Second) + defer createCancel() + + if err := c.k8sClient.Create(createCtx, tokenReview); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error creating TokenReview: %v", err)) + } + + if !tokenReview.Status.Authenticated { + return nil, status.Error(codes.Unauthenticated, fmt.Sprintf("invalid authorization: %s", tokenReview.Status.Error)) + } + + usernameItems := strings.Split(tokenReview.Status.User.Username, ":") + if len(usernameItems) != 4 || usernameItems[0] != "system" || usernameItems[1] != "serviceaccount" { + msg := fmt.Sprintf( + "token username must be of the format 'system:serviceaccount:NAMESPACE:NAME': %s", + tokenReview.Status.User.Username, + ) + return nil, status.Error(codes.Unauthenticated, msg) + } + + getCtx, getCancel := context.WithTimeout(ctx, 30*time.Second) + defer getCancel() + + var podList corev1.PodList + opts := &client.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"status.podIP": gi.IPAddress}), + } + + if err := c.k8sClient.List(getCtx, &podList, opts); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error listing pods: %s", err.Error())) + } + + if len(podList.Items) != 1 { + msg := fmt.Sprintf("expected one Pod to have IP address %s, found %d", gi.IPAddress, len(podList.Items)) + return nil, status.Error(codes.Internal, msg) + } + + podNS := podList.Items[0].GetNamespace() + if podNS != usernameItems[2] { + msg := fmt.Sprintf( + "token user namespace %q does not match namespace of requesting pod %q", usernameItems[2], podNS, + ) + return nil, status.Error(codes.Unauthenticated, msg) + } + + scName, ok := podList.Items[0].GetLabels()[controller.AppNameLabel] + if !ok { + msg := fmt.Sprintf("could not get app name from %q label; unable to authenticate token", controller.AppNameLabel) + return nil, status.Error(codes.Unauthenticated, msg) + } + + if scName != usernameItems[3] { + msg := fmt.Sprintf( + "token user name %q does not match service account name of requesting pod %q", usernameItems[3], scName, + ) + return nil, status.Error(codes.Unauthenticated, msg) } return grpcContext.NewGrpcContext(ctx, *gi), nil diff --git a/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go new file mode 100644 index 0000000000..04eda6ad50 --- /dev/null +++ b/internal/mode/static/nginx/agent/grpc/interceptor/interceptor_test.go @@ -0,0 +1,292 @@ +package interceptor + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" +) + +type mockServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (m *mockServerStream) Context() context.Context { + return m.ctx +} + +type mockClient struct { + client.Client + createErr, listErr error + username, appName, podNamespace string + authenticated bool +} + +func (m *mockClient) Create(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + tr, ok := obj.(*authv1.TokenReview) + if !ok { + return errors.New("couldn't convert object to TokenReview") + } + tr.Status.Authenticated = m.authenticated + tr.Status.User = authv1.UserInfo{Username: m.username} + + return m.createErr +} + +func (m *mockClient) List(_ context.Context, obj client.ObjectList, _ ...client.ListOption) error { + podList, ok := obj.(*corev1.PodList) + if !ok { + return errors.New("couldn't convert object to PodList") + } + + var labels map[string]string + if m.appName != "" { + labels = map[string]string{ + controller.AppNameLabel: m.appName, + } + } + + podList.Items = []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: m.podNamespace, + Labels: labels, + }, + }, + } + + return m.listErr +} + +func TestInterceptor(t *testing.T) { + t.Parallel() + + validMetadata := metadata.New(map[string]string{ + headerUUID: "test-uuid", + headerAuth: "test-token", + }) + validPeerData := &peer.Peer{ + Addr: &net.TCPAddr{IP: net.ParseIP("127.0.0.1")}, + } + + tests := []struct { + md metadata.MD + peer *peer.Peer + createErr error + listErr error + username string + appName string + podNamespace string + name string + expErrMsg string + authenticated bool + expErrCode codes.Code + }{ + { + name: "valid request", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.OK, + }, + { + name: "missing metadata", + peer: validPeerData, + authenticated: true, + expErrCode: codes.InvalidArgument, + expErrMsg: "no metadata", + }, + { + name: "missing uuid", + md: metadata.New(map[string]string{ + headerAuth: "test-token", + }), + peer: validPeerData, + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "no identity", + }, + { + name: "missing authorization", + md: metadata.New(map[string]string{ + headerUUID: "test-uuid", + }), + peer: validPeerData, + authenticated: true, + createErr: nil, + expErrCode: codes.Unauthenticated, + expErrMsg: "no authorization", + }, + { + name: "missing peer data", + md: validMetadata, + authenticated: true, + expErrCode: codes.InvalidArgument, + expErrMsg: "no peer data", + }, + { + name: "tokenreview not created", + md: validMetadata, + peer: validPeerData, + authenticated: true, + createErr: errors.New("not created"), + expErrCode: codes.Internal, + expErrMsg: "error creating TokenReview", + }, + { + name: "tokenreview created and not authenticated", + md: validMetadata, + peer: validPeerData, + authenticated: false, + expErrCode: codes.Unauthenticated, + expErrMsg: "invalid authorization", + }, + { + name: "error listing pods", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + listErr: errors.New("can't list"), + expErrCode: codes.Internal, + expErrMsg: "error listing pods", + }, + { + name: "invalid username length", + md: validMetadata, + peer: validPeerData, + username: "serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "missing system from username", + md: validMetadata, + peer: validPeerData, + username: "invalid:serviceaccount:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "missing serviceaccount from username", + md: validMetadata, + peer: validPeerData, + username: "system:invalid:default:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "must be of the format", + }, + { + name: "mismatched namespace in username", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:invalid:gateway-nginx", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "does not match namespace", + }, + { + name: "mismatched name in username", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:invalid", + appName: "gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "does not match service account name", + }, + { + name: "missing app name label", + md: validMetadata, + peer: validPeerData, + username: "system:serviceaccount:default:gateway-nginx", + podNamespace: "default", + authenticated: true, + expErrCode: codes.Unauthenticated, + expErrMsg: "could not get app name", + }, + } + + streamHandler := func(_ any, _ grpc.ServerStream) error { + return nil + } + + unaryHandler := func(_ context.Context, _ any) (any, error) { + return nil, nil //nolint:nilnil // unit test + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + mockK8sClient := &mockClient{ + authenticated: test.authenticated, + createErr: test.createErr, + listErr: test.listErr, + username: test.username, + appName: test.appName, + podNamespace: test.podNamespace, + } + cs := NewContextSetter(mockK8sClient, "ngf-audience") + + ctx := context.Background() + if test.md != nil { + peerCtx := context.Background() + if test.peer != nil { + peerCtx = peer.NewContext(context.Background(), test.peer) + } + ctx = metadata.NewIncomingContext(peerCtx, test.md) + } + + stream := &mockServerStream{ctx: ctx} + + err := cs.Stream(logr.Discard())(nil, stream, nil, streamHandler) + if test.expErrCode != codes.OK { + g.Expect(status.Code(err)).To(Equal(test.expErrCode)) + g.Expect(err.Error()).To(ContainSubstring(test.expErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + _, err = cs.Unary(logr.Discard())(ctx, nil, nil, unaryHandler) + if test.expErrCode != codes.OK { + g.Expect(status.Code(err)).To(Equal(test.expErrCode)) + g.Expect(err.Error()).To(ContainSubstring(test.expErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + }) + } +} diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go index 6b50e79e38..e03a79ade8 100644 --- a/internal/mode/static/provisioner/eventloop.go +++ b/internal/mode/static/provisioner/eventloop.go @@ -29,11 +29,13 @@ func newEventLoop( selector metav1.LabelSelector, ngfNamespace string, dockerSecrets []string, + agentTLSSecret string, usageConfig *config.UsageReportConfig, ) (*events.EventLoop, error) { nginxResourceLabelPredicate := predicate.NginxLabelPredicate(selector) - secretsToWatch := make([]string, 0, len(dockerSecrets)+3) + secretsToWatch := make([]string, 0, len(dockerSecrets)+4) + secretsToWatch = append(secretsToWatch, agentTLSSecret) secretsToWatch = append(secretsToWatch, dockerSecrets...) if usageConfig != nil { diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index 7757cec043..5058d25771 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -171,7 +171,7 @@ func (h *eventHandler) provisionResources( gatewayNSName types.NamespacedName, ) error { resources := h.store.getNginxResourcesForGateway(gatewayNSName) - if resources.Gateway != nil { + if resources != nil && resources.Gateway != nil { resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) if err := h.provisioner.provisionNginx( ctx, @@ -229,6 +229,13 @@ func (h *eventHandler) deprovisionSecretsForAllGateways(ctx context.Context, sec } switch { + case strings.HasSuffix(resources.AgentTLSSecret.Name, secret): + if err := h.provisioner.deleteSecret( + ctx, + controller.ObjectMetaToNamespacedName(resources.AgentTLSSecret), + ); err != nil { + allErrs = append(allErrs, err) + } case strings.HasSuffix(resources.PlusJWTSecret.Name, secret): if err := h.provisioner.deleteSecret( ctx, diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go index fe1d63e9be..9fd0dcc8d1 100644 --- a/internal/mode/static/provisioner/handler_test.go +++ b/internal/mode/static/provisioner/handler_test.go @@ -23,7 +23,7 @@ func TestHandleEventBatch_Upsert(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore([]string{dockerTestSecretName}, jwtTestSecretName, "", "") + store := newStore([]string{dockerTestSecretName}, "", jwtTestSecretName, "", "") provisioner, fakeClient, _ := defaultNginxProvisioner() provisioner.cfg.StatusQueue = status.NewQueue() @@ -196,7 +196,13 @@ func TestHandleEventBatch_Delete(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore([]string{dockerTestSecretName}, jwtTestSecretName, caTestSecretName, clientTestSecretName) + store := newStore( + []string{dockerTestSecretName}, + agentTLSTestSecretName, + jwtTestSecretName, + caTestSecretName, + clientTestSecretName, + ) provisioner, fakeClient, _ := defaultNginxProvisioner() provisioner.cfg.StatusQueue = status.NewQueue() @@ -233,6 +239,14 @@ func TestHandleEventBatch_Delete(t *testing.T) { }, } + originalAgentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + } + g.Expect(fakeClient.Create(ctx, originalAgentTLSSecret)).To(Succeed()) + jwtSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "gw-nginx-" + jwtTestSecretName, @@ -311,6 +325,7 @@ func TestHandleEventBatch_Delete(t *testing.T) { g.Expect(fakeClient.Get(ctx, key, &corev1.Secret{})).ToNot(Succeed()) } + verifySecret(agentTLSTestSecretName, originalAgentTLSSecret) verifySecret(jwtTestSecretName, userJwtSecret) verifySecret(caTestSecretName, userCASecret) verifySecret(clientTestSecretName, userClientSSLSecret) diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index 5e9710efd5..b62dc3b1f4 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -50,6 +50,7 @@ func (p *NginxProvisioner) buildNginxResourceObjects( ngxIncludesConfigMapName := controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix) ngxAgentConfigMapName := controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix) + agentTLSSecretName := controller.CreateNginxResourceName(resourceName, p.cfg.AgentTLSSecretName) var jwtSecretName, caSecretName, clientSSLSecretName string if p.cfg.Plus { @@ -98,6 +99,7 @@ func (p *NginxProvisioner) buildNginxResourceObjects( secrets, err := p.buildNginxSecrets( objectMeta, + agentTLSSecretName, dockerSecretNames, jwtSecretName, caSecretName, @@ -130,6 +132,7 @@ func (p *NginxProvisioner) buildNginxResourceObjects( ngxAgentConfigMapName, ports, selectorLabels, + agentTLSSecretName, dockerSecretNames, jwtSecretName, caSecretName, @@ -154,6 +157,7 @@ func (p *NginxProvisioner) buildNginxResourceObjects( func (p *NginxProvisioner) buildNginxSecrets( objectMeta metav1.ObjectMeta, + agentTLSSecretName string, dockerSecretNames map[string]string, jwtSecretName string, caSecretName string, @@ -162,6 +166,24 @@ func (p *NginxProvisioner) buildNginxSecrets( var secrets []client.Object var errs []error + if agentTLSSecretName != "" { + newSecret, err := p.getAndUpdateSecret( + p.cfg.AgentTLSSecretName, + metav1.ObjectMeta{ + Name: agentTLSSecretName, + Namespace: objectMeta.Namespace, + Labels: objectMeta.Labels, + Annotations: objectMeta.Annotations, + }, + corev1.SecretTypeTLS, + ) + if err != nil { + errs = append(errs, err) + } else { + secrets = append(secrets, newSecret) + } + } + for newName, origName := range dockerSecretNames { newSecret, err := p.getAndUpdateSecret( origName, @@ -171,6 +193,7 @@ func (p *NginxProvisioner) buildNginxSecrets( Labels: objectMeta.Labels, Annotations: objectMeta.Annotations, }, + corev1.SecretTypeDockerConfigJson, ) if err != nil { errs = append(errs, err) @@ -194,6 +217,7 @@ func (p *NginxProvisioner) buildNginxSecrets( Labels: objectMeta.Labels, Annotations: objectMeta.Annotations, }, + corev1.SecretTypeOpaque, ) if err != nil { errs = append(errs, err) @@ -211,6 +235,7 @@ func (p *NginxProvisioner) buildNginxSecrets( Labels: objectMeta.Labels, Annotations: objectMeta.Annotations, }, + corev1.SecretTypeOpaque, ) if err != nil { errs = append(errs, err) @@ -228,6 +253,7 @@ func (p *NginxProvisioner) buildNginxSecrets( Labels: objectMeta.Labels, Annotations: objectMeta.Annotations, }, + corev1.SecretTypeTLS, ) if err != nil { errs = append(errs, err) @@ -242,6 +268,7 @@ func (p *NginxProvisioner) buildNginxSecrets( func (p *NginxProvisioner) getAndUpdateSecret( name string, newObjectMeta metav1.ObjectMeta, + secretType corev1.SecretType, ) (*corev1.Secret, error) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -255,6 +282,7 @@ func (p *NginxProvisioner) getAndUpdateSecret( newSecret := &corev1.Secret{ ObjectMeta: newObjectMeta, Data: secret.Data, + Type: secretType, } return newSecret, nil @@ -402,6 +430,7 @@ func (p *NginxProvisioner) buildNginxDeployment( ngxAgentConfigMapName string, ports map[int32]struct{}, selectorLabels map[string]string, + agentTLSSecretName string, dockerSecretNames map[string]string, jwtSecretName string, caSecretName string, @@ -413,6 +442,7 @@ func (p *NginxProvisioner) buildNginxDeployment( ngxIncludesConfigMapName, ngxAgentConfigMapName, ports, + agentTLSSecretName, dockerSecretNames, jwtSecretName, caSecretName, @@ -420,7 +450,6 @@ func (p *NginxProvisioner) buildNginxDeployment( ) var object client.Object - // TODO(sberman): daemonset support deployment := &appsv1.Deployment{ ObjectMeta: objectMeta, Spec: appsv1.DeploymentSpec{ @@ -452,6 +481,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( ngxIncludesConfigMapName string, ngxAgentConfigMapName string, ports map[int32]struct{}, + agentTLSSecretName string, dockerSecretNames map[string]string, jwtSecretName string, caSecretName string, @@ -491,6 +521,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }) image, pullPolicy := p.buildImage(nProxyCfg) + tokenAudience := fmt.Sprintf("%s.%s.svc", p.cfg.GatewayPodConfig.ServiceName, p.cfg.GatewayPodConfig.Namespace) spec := corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -509,7 +540,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( Add: []corev1.Capability{"NET_BIND_SERVICE"}, Drop: []corev1.Capability{"ALL"}, }, - ReadOnlyRootFilesystem: helpers.GetPointer[bool](true), + ReadOnlyRootFilesystem: helpers.GetPointer(true), RunAsGroup: helpers.GetPointer[int64](1001), RunAsUser: helpers.GetPointer[int64](101), SeccompProfile: &corev1.SeccompProfile{ @@ -518,6 +549,8 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }, VolumeMounts: []corev1.VolumeMount{ {MountPath: "/etc/nginx-agent", Name: "nginx-agent"}, + {MountPath: "/var/run/secrets/ngf", Name: "nginx-agent-tls"}, + {MountPath: "/var/run/secrets/ngf/serviceaccount", Name: "token"}, {MountPath: "/var/log/nginx-agent", Name: "nginx-agent-log"}, {MountPath: "/etc/nginx/conf.d", Name: "nginx-conf"}, {MountPath: "/etc/nginx/stream-conf.d", Name: "nginx-stream-conf"}, @@ -562,7 +595,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, - ReadOnlyRootFilesystem: helpers.GetPointer[bool](true), + ReadOnlyRootFilesystem: helpers.GetPointer(true), RunAsGroup: helpers.GetPointer[int64](1001), RunAsUser: helpers.GetPointer[int64](101), SeccompProfile: &corev1.SeccompProfile{ @@ -574,6 +607,21 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( ImagePullSecrets: []corev1.LocalObjectReference{}, ServiceAccountName: objectMeta.Name, Volumes: []corev1.Volume{ + { + Name: "token", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: "token", + Audience: tokenAudience, + }, + }, + }, + }, + }, + }, {Name: "nginx-agent", VolumeSource: emptyDirVolumeSource}, { Name: "nginx-agent-config", @@ -585,6 +633,14 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }, }, }, + { + Name: "nginx-agent-tls", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: agentTLSSecretName, + }, + }, + }, {Name: "nginx-agent-log", VolumeSource: emptyDirVolumeSource}, {Name: "nginx-conf", VolumeSource: emptyDirVolumeSource}, {Name: "nginx-stream-conf", VolumeSource: emptyDirVolumeSource}, @@ -790,6 +846,18 @@ func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName objects := []client.Object{deployment, service, serviceAccount, bootstrapCM, agentCM} + agentTLSSecretName := controller.CreateNginxResourceName( + deploymentNSName.Name, + p.cfg.AgentTLSSecretName, + ) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSSecretName, + Namespace: deploymentNSName.Namespace, + }, + } + objects = append(objects, agentTLSSecret) + for _, name := range p.cfg.NginxDockerSecretNames { newName := controller.CreateNginxResourceName(deploymentNSName.Name, name) secret := &corev1.Secret{ diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go index f59f4ce253..907a7e9fad 100644 --- a/internal/mode/static/provisioner/objects_test.go +++ b/internal/mode/static/provisioner/objects_test.go @@ -26,19 +26,30 @@ func TestBuildNginxResourceObjects(t *testing.T) { t.Parallel() g := NewWithT(t) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + provisioner := &NginxProvisioner{ cfg: Config{ GatewayPodConfig: &config.GatewayPodConfig{ - Namespace: "default", + Namespace: ngfNamespace, Version: "1.0.0", Image: "ngf-image", }, + AgentTLSSecretName: agentTLSTestSecretName, }, baseLabelSelector: metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "nginx", }, }, + k8sClient: fakeClient, } gateway := &gatewayv1.Gateway{ @@ -83,7 +94,7 @@ func TestBuildNginxResourceObjects(t *testing.T) { objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(objects).To(HaveLen(5)) + g.Expect(objects).To(HaveLen(6)) validateLabelsAndAnnotations := func(obj client.Object) { g.Expect(obj.GetLabels()).To(Equal(expLabels)) @@ -95,7 +106,16 @@ func TestBuildNginxResourceObjects(t *testing.T) { validateLabelsAndAnnotations(obj) } - cmObj := objects[0] + secretObj := objects[0] + secret, ok := secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, agentTLSTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + g.Expect(secret.GetAnnotations()).To(Equal(expAnnotations)) + g.Expect(secret.Data).To(HaveKey("tls.crt")) + g.Expect(secret.Data["tls.crt"]).To(Equal([]byte("tls"))) + + cmObj := objects[1] cm, ok := cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxIncludesConfigMapNameSuffix))) @@ -103,7 +123,7 @@ func TestBuildNginxResourceObjects(t *testing.T) { g.Expect(cm.Data).To(HaveKey("main.conf")) g.Expect(cm.Data["main.conf"]).To(ContainSubstring("info")) - cmObj = objects[1] + cmObj = objects[2] cm, ok = cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, nginxAgentConfigMapNameSuffix))) @@ -111,12 +131,12 @@ func TestBuildNginxResourceObjects(t *testing.T) { g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("command:")) - svcAcctObj := objects[2] + svcAcctObj := objects[3] svcAcct, ok := svcAcctObj.(*corev1.ServiceAccount) g.Expect(ok).To(BeTrue()) validateMeta(svcAcct) - svcObj := objects[3] + svcObj := objects[4] svc, ok := svcObj.(*corev1.Service) g.Expect(ok).To(BeTrue()) validateMeta(svc) @@ -142,7 +162,7 @@ func TestBuildNginxResourceObjects(t *testing.T) { }, })) - depObj := objects[4] + depObj := objects[5] dep, ok := depObj.(*appsv1.Deployment) g.Expect(ok).To(BeTrue()) validateMeta(dep) @@ -186,18 +206,29 @@ func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { t.Parallel() g := NewWithT(t) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + provisioner := &NginxProvisioner{ cfg: Config{ GatewayPodConfig: &config.GatewayPodConfig{ - Namespace: "default", + Namespace: ngfNamespace, Version: "1.0.0", }, + AgentTLSSecretName: agentTLSTestSecretName, }, baseLabelSelector: metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "nginx", }, }, + k8sClient: fakeClient, } gateway := &gatewayv1.Gateway{ @@ -247,21 +278,21 @@ func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(objects).To(HaveLen(5)) + g.Expect(objects).To(HaveLen(6)) - cmObj := objects[0] + cmObj := objects[1] cm, ok := cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.Data).To(HaveKey("main.conf")) g.Expect(cm.Data["main.conf"]).To(ContainSubstring("debug")) - cmObj = objects[1] + cmObj = objects[2] cm, ok = cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("level: debug")) g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("port: 8080")) - svcObj := objects[3] + svcObj := objects[4] svc, ok := svcObj.(*corev1.Service) g.Expect(ok).To(BeTrue()) g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) @@ -269,7 +300,7 @@ func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { g.Expect(svc.Spec.LoadBalancerIP).To(Equal("1.2.3.4")) g.Expect(svc.Spec.LoadBalancerSourceRanges).To(Equal([]string{"5.6.7.8"})) - depObj := objects[4] + depObj := objects[5] dep, ok := depObj.(*appsv1.Deployment) g.Expect(ok).To(BeTrue()) @@ -293,6 +324,13 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { t.Parallel() g := NewWithT(t) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } jwtSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: jwtTestSecretName, @@ -315,7 +353,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { Data: map[string][]byte{"tls.crt": []byte("tls")}, } - fakeClient := fake.NewFakeClient(jwtSecret, caSecret, clientSSLSecret) + fakeClient := fake.NewFakeClient(agentTLSSecret, jwtSecret, caSecret, clientSSLSecret) provisioner := &NginxProvisioner{ cfg: Config{ @@ -330,6 +368,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { Endpoint: "test.com", SkipVerify: true, }, + AgentTLSSecretName: agentTLSTestSecretName, }, k8sClient: fakeClient, baseLabelSelector: metav1.LabelSelector{ @@ -360,7 +399,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(objects).To(HaveLen(8)) + g.Expect(objects).To(HaveLen(9)) expLabels := map[string]string{ "label": "value", @@ -372,7 +411,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { "annotation": "value", } - secretObj := objects[0] + secretObj := objects[1] secret, ok := secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, jwtTestSecretName))) @@ -381,7 +420,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { g.Expect(secret.Data).To(HaveKey("license.jwt")) g.Expect(secret.Data["license.jwt"]).To(Equal([]byte("jwt"))) - secretObj = objects[1] + secretObj = objects[2] secret, ok = secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, caTestSecretName))) @@ -390,7 +429,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { g.Expect(secret.Data).To(HaveKey("ca.crt")) g.Expect(secret.Data["ca.crt"]).To(Equal([]byte("ca"))) - secretObj = objects[2] + secretObj = objects[3] secret, ok = secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, clientTestSecretName))) @@ -399,7 +438,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { g.Expect(secret.Data).To(HaveKey("tls.crt")) g.Expect(secret.Data["tls.crt"]).To(Equal([]byte("tls"))) - cmObj := objects[3] + cmObj := objects[4] cm, ok := cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.Data).To(HaveKey("mgmt.conf")) @@ -409,13 +448,13 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate")) g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate_key")) - cmObj = objects[4] + cmObj = objects[5] cm, ok = cmObj.(*corev1.ConfigMap) g.Expect(ok).To(BeTrue()) g.Expect(cm.Data).To(HaveKey("nginx-agent.conf")) g.Expect(cm.Data["nginx-agent.conf"]).To(ContainSubstring("api-action")) - depObj := objects[7] + depObj := objects[8] dep, ok := depObj.(*appsv1.Deployment) g.Expect(ok).To(BeTrue()) @@ -439,6 +478,14 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { t.Parallel() g := NewWithT(t) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + dockerSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: dockerTestSecretName, @@ -464,7 +511,7 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { }, Data: map[string][]byte{"data": []byte("docker-registry2")}, } - fakeClient := fake.NewFakeClient(dockerSecret, dockerSecretRegistry1, dockerSecretRegistry2) + fakeClient := fake.NewFakeClient(agentTLSSecret, dockerSecret, dockerSecretRegistry1, dockerSecretRegistry2) provisioner := &NginxProvisioner{ cfg: Config{ @@ -472,6 +519,7 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { Namespace: ngfNamespace, }, NginxDockerSecretNames: []string{dockerTestSecretName, dockerSecretRegistry1Name, dockerSecretRegistry2Name}, + AgentTLSSecretName: agentTLSTestSecretName, }, k8sClient: fakeClient, baseLabelSelector: metav1.LabelSelector{ @@ -492,7 +540,7 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(objects).To(HaveLen(8)) + g.Expect(objects).To(HaveLen(9)) expLabels := map[string]string{ "app": "nginx", @@ -500,27 +548,33 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { "app.kubernetes.io/name": "gw-nginx", } - // the (docker-only) secret order in the object list is sorted by secret name - secretObj := objects[0] secret, ok := secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) + g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, agentTLSTestSecretName))) + g.Expect(secret.GetLabels()).To(Equal(expLabels)) + + // the (docker-only) secret order in the object list is sorted by secret name + + secretObj = objects[1] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerTestSecretName))) g.Expect(secret.GetLabels()).To(Equal(expLabels)) - registry1SecretObj := objects[1] + registry1SecretObj := objects[2] secret, ok = registry1SecretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry1Name))) g.Expect(secret.GetLabels()).To(Equal(expLabels)) - registry2SecretObj := objects[2] + registry2SecretObj := objects[3] secret, ok = registry2SecretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) g.Expect(secret.GetName()).To(Equal(controller.CreateNginxResourceName(resourceName, dockerSecretRegistry2Name))) g.Expect(secret.GetLabels()).To(Equal(expLabels)) - depObj := objects[7] + depObj := objects[8] dep, ok := depObj.(*appsv1.Deployment) g.Expect(ok).To(BeTrue()) @@ -553,10 +607,14 @@ func TestGetAndUpdateSecret_NotFound(t *testing.T) { k8sClient: fakeClient, } - _, err := provisioner.getAndUpdateSecret("non-existent-secret", metav1.ObjectMeta{ - Name: "new-secret", - Namespace: "default", - }) + _, err := provisioner.getAndUpdateSecret( + "non-existent-secret", + metav1.ObjectMeta{ + Name: "new-secret", + Namespace: "default", + }, + corev1.SecretTypeOpaque, + ) g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring("error getting secret")) @@ -575,7 +633,7 @@ func TestBuildNginxResourceObjectsForDeletion(t *testing.T) { objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) - g.Expect(objects).To(HaveLen(5)) + g.Expect(objects).To(HaveLen(6)) validateMeta := func(obj client.Object, name string) { g.Expect(obj.GetName()).To(Equal(name)) @@ -621,6 +679,7 @@ func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { ClientSSLSecretName: clientTestSecretName, }, NginxDockerSecretNames: []string{dockerTestSecretName}, + AgentTLSSecretName: agentTLSTestSecretName, }, } @@ -631,7 +690,7 @@ func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) - g.Expect(objects).To(HaveLen(9)) + g.Expect(objects).To(HaveLen(10)) validateMeta := func(obj client.Object, name string) { g.Expect(obj.GetName()).To(Equal(name)) @@ -668,7 +727,7 @@ func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { g.Expect(ok).To(BeTrue()) validateMeta(secret, controller.CreateNginxResourceName( deploymentNSName.Name, - provisioner.cfg.NginxDockerSecretNames[0], + provisioner.cfg.AgentTLSSecretName, )) secretObj = objects[6] @@ -676,12 +735,20 @@ func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { g.Expect(ok).To(BeTrue()) validateMeta(secret, controller.CreateNginxResourceName( deploymentNSName.Name, - provisioner.cfg.PlusUsageConfig.CASecretName, + provisioner.cfg.NginxDockerSecretNames[0], )) secretObj = objects[7] secret, ok = secretObj.(*corev1.Secret) g.Expect(ok).To(BeTrue()) + validateMeta(secret, controller.CreateNginxResourceName( + deploymentNSName.Name, + provisioner.cfg.PlusUsageConfig.CASecretName, + )) + + secretObj = objects[8] + secret, ok = secretObj.(*corev1.Secret) + g.Expect(ok).To(BeTrue()) validateMeta(secret, controller.CreateNginxResourceName( deploymentNSName.Name, provisioner.cfg.PlusUsageConfig.ClientSSLSecretName, diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index 71439f4b4e..e4a0ce7bee 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -42,7 +42,8 @@ type Provisioner interface { // Config is the configuration for the Provisioner. type Config struct { - GCName string + GCName string + AgentTLSSecretName string DeploymentStore agent.DeploymentStorer StatusQueue *status.Queue @@ -81,7 +82,13 @@ func NewNginxProvisioner( caSecretName = cfg.PlusUsageConfig.CASecretName clientSSLSecretName = cfg.PlusUsageConfig.ClientSSLSecretName } - store := newStore(cfg.NginxDockerSecretNames, jwtSecretName, caSecretName, clientSSLSecretName) + store := newStore( + cfg.NginxDockerSecretNames, + cfg.AgentTLSSecretName, + jwtSecretName, + caSecretName, + clientSSLSecretName, + ) selector := metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -114,6 +121,7 @@ func NewNginxProvisioner( selector, cfg.GatewayPodConfig.Namespace, cfg.NginxDockerSecretNames, + cfg.AgentTLSSecretName, cfg.PlusUsageConfig, ) if err != nil { @@ -354,6 +362,10 @@ func (p *NginxProvisioner) deprovisionNginx(ctx context.Context, gatewayNSName t // isUserSecret determines if the provided secret name is a special user secret, // for example an NGINX docker registry secret or NGINX Plus secret. func (p *NginxProvisioner) isUserSecret(name string) bool { + if name == p.cfg.AgentTLSSecretName { + return true + } + if slices.Contains(p.cfg.NginxDockerSecretNames, name) { return true } diff --git a/internal/mode/static/provisioner/provisioner_test.go b/internal/mode/static/provisioner/provisioner_test.go index 8ef7873386..987b835352 100644 --- a/internal/mode/static/provisioner/provisioner_test.go +++ b/internal/mode/static/provisioner/provisioner_test.go @@ -27,12 +27,13 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) -var ( - jwtTestSecretName = "jwt-secret" - caTestSecretName = "ca-secret" - clientTestSecretName = "client-secret" - dockerTestSecretName = "docker-secret" - ngfNamespace = "nginx-gateway" +const ( + agentTLSTestSecretName = "agent-tls-secret" + jwtTestSecretName = "jwt-secret" + caTestSecretName = "ca-secret" + clientTestSecretName = "client-secret" + dockerTestSecretName = "docker-secret" + ngfNamespace = "nginx-gateway" ) func createScheme() *runtime.Scheme { @@ -64,6 +65,12 @@ func expectResourcesToExist(g *WithT, k8sClient client.Client, nsName types.Name } g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).To(Succeed()) + agentTLSSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, agentTLSTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentTLSSecret, &corev1.Secret{})).To(Succeed()) + if !plus { return } @@ -112,6 +119,12 @@ func expectResourcesToNotExist(g *WithT, k8sClient client.Client, nsName types.N } g.Expect(k8sClient.Get(context.TODO(), agentCM, &corev1.ConfigMap{})).ToNot(Succeed()) + agentTLSSecret := types.NamespacedName{ + Name: controller.CreateNginxResourceName(nsName.Name, agentTLSTestSecretName), + Namespace: nsName.Namespace, + } + g.Expect(k8sClient.Get(context.TODO(), agentTLSSecret, &corev1.Secret{})).ToNot(Succeed()) + jwtSecret := types.NamespacedName{ Name: controller.CreateNginxResourceName(nsName.Name, jwtTestSecretName), Namespace: nsName.Namespace, @@ -144,7 +157,13 @@ func defaultNginxProvisioner( deploymentStore := &agentfakes.FakeDeploymentStorer{} return &NginxProvisioner{ - store: newStore([]string{dockerTestSecretName}, jwtTestSecretName, caTestSecretName, clientTestSecretName), + store: newStore( + []string{dockerTestSecretName}, + agentTLSTestSecretName, + jwtTestSecretName, + caTestSecretName, + clientTestSecretName, + ), k8sClient: fakeClient, cfg: Config{ DeploymentStore: deploymentStore, @@ -162,6 +181,7 @@ func defaultNginxProvisioner( ClientSSLSecretName: clientTestSecretName, }, NginxDockerSecretNames: []string{dockerTestSecretName}, + AgentTLSSecretName: agentTLSTestSecretName, }, leader: true, }, fakeClient, deploymentStore @@ -232,6 +252,12 @@ func TestRegisterGateway(t *testing.T) { objects := []client.Object{ gateway.Source, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: jwtTestSecretName, @@ -328,7 +354,14 @@ func TestProvisionerRestartsDeployment(t *testing.T) { } // provision everything first - provisioner, fakeClient, _ := defaultNginxProvisioner(gateway.Source) + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + provisioner, fakeClient, _ := defaultNginxProvisioner(gateway.Source, agentTLSSecret) provisioner.cfg.Plus = false provisioner.cfg.NginxDockerSecretNames = nil diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go index ac63beb907..e28a42e103 100644 --- a/internal/mode/static/provisioner/store.go +++ b/internal/mode/static/provisioner/store.go @@ -23,6 +23,7 @@ type NginxResources struct { ServiceAccount metav1.ObjectMeta BootstrapConfigMap metav1.ObjectMeta AgentConfigMap metav1.ObjectMeta + AgentTLSSecret metav1.ObjectMeta PlusJWTSecret metav1.ObjectMeta PlusClientSSLSecret metav1.ObjectMeta PlusCASecret metav1.ObjectMeta @@ -37,7 +38,9 @@ type store struct { // nginxResources is a map of Gateway NamespacedNames and their associated nginx resources. nginxResources map[types.NamespacedName]*NginxResources - dockerSecretNames map[string]struct{} + dockerSecretNames map[string]struct{} + agentTLSSecretName string + // NGINX Plus secrets jwtSecretName string caSecretName string @@ -48,6 +51,7 @@ type store struct { func newStore( dockerSecretNames []string, + agentTLSSecretName, jwtSecretName, caSecretName, clientSSLSecretName string, @@ -61,6 +65,7 @@ func newStore( gateways: make(map[types.NamespacedName]*gatewayv1.Gateway), nginxResources: make(map[types.NamespacedName]*NginxResources), dockerSecretNames: dockerSecretNamesMap, + agentTLSSecretName: agentTLSSecretName, jwtSecretName: jwtSecretName, caSecretName: caSecretName, clientSSLSecretName: clientSSLSecretName, @@ -167,6 +172,7 @@ func (s *store) registerConfigMapInGatewayConfig(obj *corev1.ConfigMap, gatewayN } } +//nolint:gocyclo // will refactor at some point func (s *store) registerSecretInGatewayConfig(obj *corev1.Secret, gatewayNSName types.NamespacedName) { hasSuffix := func(str, suffix string) bool { return suffix != "" && strings.HasSuffix(str, suffix) @@ -174,6 +180,10 @@ func (s *store) registerSecretInGatewayConfig(obj *corev1.Secret, gatewayNSName if cfg, ok := s.nginxResources[gatewayNSName]; !ok { switch { + case hasSuffix(obj.GetName(), s.agentTLSSecretName): + s.nginxResources[gatewayNSName] = &NginxResources{ + AgentTLSSecret: obj.ObjectMeta, + } case hasSuffix(obj.GetName(), s.jwtSecretName): s.nginxResources[gatewayNSName] = &NginxResources{ PlusJWTSecret: obj.ObjectMeta, @@ -198,6 +208,8 @@ func (s *store) registerSecretInGatewayConfig(obj *corev1.Secret, gatewayNSName } } else { switch { + case hasSuffix(obj.GetName(), s.agentTLSSecretName): + cfg.AgentTLSSecret = obj.ObjectMeta case hasSuffix(obj.GetName(), s.jwtSecretName): cfg.PlusJWTSecret = obj.ObjectMeta case hasSuffix(obj.GetName(), s.caSecretName): @@ -284,6 +296,10 @@ func (s *store) gatewayExistsForResource(object client.Object, nsName types.Name } func secretResourceMatches(resources *NginxResources, nsName types.NamespacedName) bool { + if resourceMatches(resources.AgentTLSSecret, nsName) { + return true + } + for _, secret := range resources.DockerSecrets { if resourceMatches(secret, nsName) { return true diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go index 079736fde0..814ef1bd79 100644 --- a/internal/mode/static/provisioner/store_test.go +++ b/internal/mode/static/provisioner/store_test.go @@ -22,10 +22,11 @@ func TestNewStore(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret") + store := newStore([]string{"docker-secret"}, "agent-tls-secret", "jwt-secret", "ca-secret", "client-ssl-secret") g.Expect(store).NotTo(BeNil()) g.Expect(store.dockerSecretNames).To(HaveKey("docker-secret")) + g.Expect(store.agentTLSSecretName).To(Equal("agent-tls-secret")) g.Expect(store.jwtSecretName).To(Equal("jwt-secret")) g.Expect(store.caSecretName).To(Equal("ca-secret")) g.Expect(store.clientSSLSecretName).To(Equal("client-ssl-secret")) @@ -35,7 +36,7 @@ func TestUpdateGateway(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore(nil, "", "", "", "") gateway := &gatewayv1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Name: "test-gateway", @@ -54,7 +55,7 @@ func TestDeleteGateway(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore(nil, "", "", "", "") nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} store.gateways[nsName] = &gatewayv1.Gateway{} @@ -68,7 +69,7 @@ func TestGetGateways(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore(nil, "", "", "", "") gateway1 := &gatewayv1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Name: "test-gateway-1", @@ -99,7 +100,7 @@ func TestRegisterResourceInGatewayConfig(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore([]string{"docker-secret"}, "jwt-secret", "ca-secret", "client-ssl-secret") + store := newStore([]string{"docker-secret"}, "agent-tls-secret", "jwt-secret", "ca-secret", "client-ssl-secret") nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} registerAndGetResources := func(obj interface{}) *NginxResources { @@ -198,6 +199,22 @@ func TestRegisterResourceInGatewayConfig(t *testing.T) { // clear out resources before next test store.deleteResourcesForGateway(nsName) + // Secret + agentTLSSecretMeta := metav1.ObjectMeta{ + Name: controller.CreateNginxResourceName(defaultMeta.Name, store.agentTLSSecretName), + Namespace: defaultMeta.Namespace, + } + agentTLSSecret := &corev1.Secret{ObjectMeta: agentTLSSecretMeta} + resources = registerAndGetResources(agentTLSSecret) + g.Expect(resources.AgentTLSSecret).To(Equal(agentTLSSecretMeta)) + + // Secret again, already exists + resources = registerAndGetResources(agentTLSSecret) + g.Expect(resources.AgentTLSSecret).To(Equal(agentTLSSecretMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + // Secret jwtSecretMeta := metav1.ObjectMeta{ Name: controller.CreateNginxResourceName(defaultMeta.Name, store.jwtSecretName), @@ -361,7 +378,7 @@ func TestDeleteResourcesForGateway(t *testing.T) { t.Parallel() g := NewWithT(t) - store := newStore(nil, "", "", "") + store := newStore(nil, "", "", "", "") nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} store.nginxResources[nsName] = &NginxResources{} @@ -373,7 +390,7 @@ func TestDeleteResourcesForGateway(t *testing.T) { func TestGatewayExistsForResource(t *testing.T) { t.Parallel() - store := newStore(nil, "", "", "") + store := newStore(nil, "", "", "", "") gateway := &graph.Gateway{} store.nginxResources[types.NamespacedName{Name: "test-gateway", Namespace: "default"}] = &NginxResources{ Gateway: gateway, @@ -397,6 +414,10 @@ func TestGatewayExistsForResource(t *testing.T) { Name: "test-agent-configmap", Namespace: "default", }, + AgentTLSSecret: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, PlusJWTSecret: metav1.ObjectMeta{ Name: "test-jwt-secret", Namespace: "default", @@ -472,6 +493,16 @@ func TestGatewayExistsForResource(t *testing.T) { }, expected: gateway, }, + { + name: "Agent TLS Secret exists", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, + }, + expected: gateway, + }, { name: "JWT Secret exists", object: &corev1.Secret{ diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go index 05e1b96623..e58ee3abec 100644 --- a/internal/mode/static/provisioner/templates.go +++ b/internal/mode/static/provisioner/templates.go @@ -33,6 +33,13 @@ const agentTemplateText = `command: server: host: {{ .ServiceName }}.{{ .Namespace }}.svc port: 443 + auth: + tokenpath: /var/run/secrets/ngf/serviceaccount/token + tls: + cert: /var/run/secrets/ngf/tls.crt + key: /var/run/secrets/ngf/tls.key + ca: /var/run/secrets/ngf/ca.crt + server_name: {{ .ServiceName }}.{{ .Namespace }}.svc allowed_directories: - /etc/nginx - /usr/share/nginx From bf3faa9e8c12a856b8d859feedc115a72fdaf65a Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 31 Mar 2025 10:49:03 -0600 Subject: [PATCH 18/32] CP/DP Split: handle kill signal (#3260) Problem: The data plane container was not properly handling the kill signal when the Pod was Terminated. Solution: Update the entrypoint to catch the proper signals. --- build/entrypoint.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/build/entrypoint.sh b/build/entrypoint.sh index 4ccd6be3a2..9e9552b338 100755 --- a/build/entrypoint.sh +++ b/build/entrypoint.sh @@ -6,11 +6,24 @@ handle_term() { echo "received TERM signal" echo "stopping nginx-agent ..." kill -TERM "${agent_pid}" 2>/dev/null + wait -n ${agent_pid} echo "stopping nginx ..." kill -TERM "${nginx_pid}" 2>/dev/null + wait -n ${nginx_pid} +} + +handle_quit() { + echo "received QUIT signal" + echo "stopping nginx-agent ..." + kill -QUIT "${agent_pid}" 2>/dev/null + wait -n ${agent_pid} + echo "stopping nginx ..." + kill -QUIT "${nginx_pid}" 2>/dev/null + wait -n ${nginx_pid} } trap 'handle_term' TERM +trap 'handle_quit' QUIT rm -rf /var/run/nginx/*.sock From 0592d906e09d7ab7da86e27c4fecc54ef0518e29 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 7 Apr 2025 07:36:03 -0600 Subject: [PATCH 19/32] CP/DP Split: Openshift support (#3278) Problem: Now that we have additional pods in the new architecture, we need the proper SecurityContextConstraints for running in Openshift. Solution: Create an SCC for the cert-generator and an SCC for nginx data plane pods on startup. A Role and RoleBinding are created when deploying nginx to link to the SCC. --- charts/nginx-gateway-fabric/README.md | 6 - charts/nginx-gateway-fabric/README.md.gotmpl | 6 - .../templates/certs-job.yaml | 44 +++++++ .../templates/clusterrole.yaml | 11 ++ .../templates/deployment.yaml | 3 + .../nginx-gateway-fabric/templates/scc.yaml | 44 ++++++- cmd/gateway/commands.go | 12 ++ cmd/gateway/commands_test.go | 17 +++ deploy/openshift/deploy.yaml | 100 +++++++++++++++ internal/mode/static/config/config.go | 2 + internal/mode/static/manager.go | 3 + internal/mode/static/provisioner/eventloop.go | 65 ++++++++-- internal/mode/static/provisioner/handler.go | 6 +- internal/mode/static/provisioner/objects.go | 68 +++++++++- .../mode/static/provisioner/objects_test.go | 91 ++++++++++++++ .../static/provisioner/openshift/openshift.go | 38 ++++++ .../openshiftfakes/fake_apichecker.go | 117 ++++++++++++++++++ .../mode/static/provisioner/provisioner.go | 13 ++ .../static/provisioner/provisioner_test.go | 2 + internal/mode/static/provisioner/setter.go | 24 ++++ internal/mode/static/provisioner/store.go | 28 +++++ .../mode/static/provisioner/store_test.go | 53 ++++++++ 22 files changed, 720 insertions(+), 33 deletions(-) create mode 100644 internal/mode/static/provisioner/openshift/openshift.go create mode 100644 internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 927d4779d6..3a7a41cdf7 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -115,12 +115,6 @@ To use a NodePort Service instead: helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` -To disable the creation of a Service: - -```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.create=false -``` - ## Upgrading the Chart > [!NOTE] diff --git a/charts/nginx-gateway-fabric/README.md.gotmpl b/charts/nginx-gateway-fabric/README.md.gotmpl index 6306d2a647..f757a7cc8f 100644 --- a/charts/nginx-gateway-fabric/README.md.gotmpl +++ b/charts/nginx-gateway-fabric/README.md.gotmpl @@ -113,12 +113,6 @@ To use a NodePort Service instead: helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.type=NodePort ``` -To disable the creation of a Service: - -```shell -helm install ngf oci://ghcr.io/nginx/charts/nginx-gateway-fabric --create-namespace -n nginx-gateway --set nginx.service.create=false -``` - ## Upgrading the Chart > [!NOTE] diff --git a/charts/nginx-gateway-fabric/templates/certs-job.yaml b/charts/nginx-gateway-fabric/templates/certs-job.yaml index a2b529ae1b..96da6289e2 100644 --- a/charts/nginx-gateway-fabric/templates/certs-job.yaml +++ b/charts/nginx-gateway-fabric/templates/certs-job.yaml @@ -56,6 +56,50 @@ subjects: name: {{ include "nginx-gateway.fullname" . }}-cert-generator namespace: {{ .Release.Namespace }} --- +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ include "nginx-gateway.scc-name" . }}-cert-generator + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} + annotations: + "helm.sh/hook-weight": "-1" + "helm.sh/hook": pre-install +allowPrivilegeEscalation: false +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: true +runAsUser: + type: MustRunAsRange + uidRangeMin: 101 + uidRangeMax: 101 +fsGroup: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +supplementalGroups: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +users: +- {{ printf "system:serviceaccount:%s:%s-cert-generator" .Release.Namespace (include "nginx-gateway.fullname" .) }} +requiredDropCapabilities: +- ALL +volumes: +- projected +--- +{{- end }} apiVersion: batch/v1 kind: Job metadata: diff --git a/charts/nginx-gateway-fabric/templates/clusterrole.yaml b/charts/nginx-gateway-fabric/templates/clusterrole.yaml index 479c22adbc..6266134602 100644 --- a/charts/nginx-gateway-fabric/templates/clusterrole.yaml +++ b/charts/nginx-gateway-fabric/templates/clusterrole.yaml @@ -150,8 +150,19 @@ rules: - securitycontextconstraints resourceNames: - {{ include "nginx-gateway.scc-name" . }} + - {{ include "nginx-gateway.scc-name" . }}-nginx verbs: - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: - create + - update + - delete + - list + - get - watch {{- end }} diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index e742e99ec5..8fee4b36f2 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -99,6 +99,9 @@ spec: {{- if .Values.nginxGateway.snippetsFilters.enable }} - --snippets-filters {{- end }} + {{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} + - --nginx-scc={{ include "nginx-gateway.scc-name" . }}-nginx + {{- end}} env: - name: POD_NAMESPACE valueFrom: diff --git a/charts/nginx-gateway-fabric/templates/scc.yaml b/charts/nginx-gateway-fabric/templates/scc.yaml index 1564e84e32..783300c3fe 100644 --- a/charts/nginx-gateway-fabric/templates/scc.yaml +++ b/charts/nginx-gateway-fabric/templates/scc.yaml @@ -1,9 +1,10 @@ -# TODO(sberman): will need an SCC for nginx ServiceAccounts as well. {{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 metadata: name: {{ include "nginx-gateway.scc-name" . }} + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} allowPrivilegeEscalation: false allowHostDirVolumePlugin: false allowHostIPC: false @@ -36,4 +37,45 @@ requiredDropCapabilities: - ALL volumes: - secret +--- +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ include "nginx-gateway.scc-name" . }}-nginx + labels: + {{- include "nginx-gateway.labels" . | nindent 4 }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: true +runAsUser: + type: MustRunAsRange + uidRangeMin: 101 + uidRangeMax: 101 +fsGroup: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +supplementalGroups: + type: MustRunAs + ranges: + - min: 1001 + max: 1001 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +allowedCapabilities: +- NET_BIND_SERVICE +requiredDropCapabilities: +- ALL +volumes: +- emptyDir +- secret +- configMap +- projected {{- end }} diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index 4d875cb6ac..853ddd0f72 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -81,6 +81,7 @@ func createControllerCommand() *cobra.Command { usageReportClientSSLSecretFlag = "usage-report-client-ssl-secret" //nolint:gosec // not credentials usageReportCASecretFlag = "usage-report-ca-secret" //nolint:gosec // not credentials snippetsFiltersFlag = "snippets-filters" + nginxSCCFlag = "nginx-scc" ) // flag values @@ -105,6 +106,9 @@ func createControllerCommand() *cobra.Command { validator: validateResourceName, value: agentTLSSecret, } + nginxSCCName = stringValidatingValue{ + validator: validateResourceName, + } disableMetrics bool metricsSecure bool metricsListenPort = intValidatingValue{ @@ -264,6 +268,7 @@ func createControllerCommand() *cobra.Command { SnippetsFilters: snippetsFilters, NginxDockerSecretNames: nginxDockerSecrets.values, AgentTLSSecretName: agentTLSSecretName.value, + NGINXSCCName: nginxSCCName.value, } if err := static.StartManager(conf); err != nil { @@ -457,6 +462,13 @@ func createControllerCommand() *cobra.Command { "generated NGINX config for HTTPRoute and GRPCRoute resources.", ) + cmd.Flags().Var( + &nginxSCCName, + nginxSCCFlag, + `The name of the SecurityContextConstraints to be used with the NGINX data plane Pods.`+ + ` Only applicable in OpenShift.`, + ) + return cmd } diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 8662c4ef0d..0cc031a1c5 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -158,6 +158,7 @@ func TestControllerCmdFlagValidation(t *testing.T) { "--usage-report-ca-secret=ca-secret", "--usage-report-client-ssl-secret=client-secret", "--snippets-filters", + "--nginx-scc=nginx-sscc-name", }, wantErr: false, }, @@ -445,6 +446,22 @@ func TestControllerCmdFlagValidation(t *testing.T) { }, wantErr: true, }, + { + name: "nginx-scc is set to empty string", + args: []string{ + "--nginx-scc=", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "" for "--nginx-scc" flag: must be set`, + }, + { + name: "nginx-scc is invalid", + args: []string{ + "--nginx-scc=!@#$", + }, + wantErr: true, + expectedErrPrefix: `invalid argument "!@#$" for "--nginx-scc" flag: invalid format: `, + }, } // common flags validation is tested separately diff --git a/deploy/openshift/deploy.yaml b/deploy/openshift/deploy.yaml index d61f5e49ac..d5bc82d23f 100644 --- a/deploy/openshift/deploy.yaml +++ b/deploy/openshift/deploy.yaml @@ -175,11 +175,22 @@ rules: - security.openshift.io resourceNames: - nginx-gateway-scc + - nginx-gateway-scc-nginx resources: - securitycontextconstraints verbs: - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: - create + - update + - delete + - list + - get - watch --- apiVersion: rbac.authorization.k8s.io/v1 @@ -272,6 +283,7 @@ spec: - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election + - --nginx-scc=nginx-gateway-scc-nginx env: - name: POD_NAMESPACE valueFrom: @@ -442,6 +454,10 @@ fsGroup: type: MustRunAs kind: SecurityContextConstraints metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge name: nginx-gateway-scc readOnlyRootFilesystem: true requiredDropCapabilities: @@ -463,3 +479,87 @@ users: - system:serviceaccount:nginx-gateway:nginx-gateway volumes: - secret +--- +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: false +allowPrivilegedContainer: false +apiVersion: security.openshift.io/v1 +fsGroup: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +kind: SecurityContextConstraints +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-scc-cert-generator +readOnlyRootFilesystem: true +requiredDropCapabilities: +- ALL +runAsUser: + type: MustRunAsRange + uidRangeMax: 101 + uidRangeMin: 101 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +supplementalGroups: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +users: +- system:serviceaccount:nginx-gateway:nginx-gateway-cert-generator +volumes: +- projected +--- +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +- NET_BIND_SERVICE +apiVersion: security.openshift.io/v1 +fsGroup: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +kind: SecurityContextConstraints +metadata: + labels: + app.kubernetes.io/instance: nginx-gateway + app.kubernetes.io/name: nginx-gateway + app.kubernetes.io/version: edge + name: nginx-gateway-scc-nginx +readOnlyRootFilesystem: true +requiredDropCapabilities: +- ALL +runAsUser: + type: MustRunAsRange + uidRangeMax: 101 + uidRangeMin: 101 +seLinuxContext: + type: MustRunAs +seccompProfiles: +- runtime/default +supplementalGroups: + ranges: + - max: 1001 + min: 1001 + type: MustRunAs +volumes: +- emptyDir +- secret +- configMap +- projected diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index d37c6b55f7..9248070e03 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -34,6 +34,8 @@ type Config struct { GatewayClassName string // AgentTLSSecretName is the name of the TLS Secret used by NGINX Agent to communicate with the control plane. AgentTLSSecretName string + // NGINXSCCName is the name of the SecurityContextConstraints for the NGINX Pods. Only applicable in OpenShift. + NGINXSCCName string // NginxDockerSecretNames are the names of any Docker registry Secrets for the NGINX container. NginxDockerSecretNames []string // LeaderElection contains the configuration for leader election. diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index bc68ba5510..7b3d28f140 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -14,6 +14,7 @@ import ( authv1 "k8s.io/api/authentication/v1" apiv1 "k8s.io/api/core/v1" discoveryV1 "k8s.io/api/discovery/v1" + rbacv1 "k8s.io/api/rbac/v1" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -93,6 +94,7 @@ func init() { utilruntime.Must(apiext.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) utilruntime.Must(authv1.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) } func StartManager(cfg config.Config) error { @@ -216,6 +218,7 @@ func StartManager(cfg config.Config) error { GatewayPodConfig: &cfg.GatewayPodConfig, GCName: cfg.GatewayClassName, AgentTLSSecretName: cfg.AgentTLSSecretName, + NGINXSCCName: cfg.NGINXSCCName, Plus: cfg.Plus, NginxDockerSecretNames: cfg.NginxDockerSecretNames, PlusUsageConfig: &cfg.UsageReportConfig, diff --git a/internal/mode/static/provisioner/eventloop.go b/internal/mode/static/provisioner/eventloop.go index e03a79ade8..5c5d4bea49 100644 --- a/internal/mode/static/provisioner/eventloop.go +++ b/internal/mode/static/provisioner/eventloop.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -31,6 +32,7 @@ func newEventLoop( dockerSecrets []string, agentTLSSecret string, usageConfig *config.UsageReportConfig, + isOpenshift bool, ) (*events.EventLoop, error) { nginxResourceLabelPredicate := predicate.NginxLabelPredicate(selector) @@ -50,11 +52,12 @@ func newEventLoop( } } - controllerRegCfgs := []struct { + type ctlrCfg struct { objectType ngftypes.ObjectType - name string options []controller.Option - }{ + } + + controllerRegCfgs := []ctlrCfg{ { objectType: &gatewayv1.Gateway{}, }, @@ -118,6 +121,33 @@ func newEventLoop( }, } + if isOpenshift { + controllerRegCfgs = append(controllerRegCfgs, + ctlrCfg{ + objectType: &rbacv1.Role{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + ctlrCfg{ + objectType: &rbacv1.RoleBinding{}, + options: []controller.Option{ + controller.WithK8sPredicate( + k8spredicate.And( + k8spredicate.GenerationChangedPredicate{}, + nginxResourceLabelPredicate, + ), + ), + }, + }, + ) + } + eventCh := make(chan any) for _, regCfg := range controllerRegCfgs { gvk, err := apiutil.GVKForObject(regCfg.objectType, mgr.GetScheme()) @@ -137,19 +167,28 @@ func newEventLoop( } } + objectList := []client.ObjectList{ + // GatewayList MUST be first in this list to ensure that we see it before attempting + // to provision or deprovision any nginx resources. + &gatewayv1.GatewayList{}, + &appsv1.DeploymentList{}, + &corev1.ServiceList{}, + &corev1.ServiceAccountList{}, + &corev1.ConfigMapList{}, + &corev1.SecretList{}, + } + + if isOpenshift { + objectList = append(objectList, + &rbacv1.RoleList{}, + &rbacv1.RoleBindingList{}, + ) + } + firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( mgr.GetCache(), []client.Object{}, - []client.ObjectList{ - // GatewayList MUST be first in this list to ensure that we see it before attempting - // to provision or deprovision any nginx resources. - &gatewayv1.GatewayList{}, - &appsv1.DeploymentList{}, - &corev1.ServiceList{}, - &corev1.ServiceAccountList{}, - &corev1.ConfigMapList{}, - &corev1.SecretList{}, - }, + objectList, ) eventLoop := events.NewEventLoop( diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index 5058d25771..ef6ba76b82 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -9,6 +9,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -59,7 +60,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, switch obj := e.Resource.(type) { case *gatewayv1.Gateway: h.store.updateGateway(obj) - case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap: + case *appsv1.Deployment, *corev1.ServiceAccount, *corev1.ConfigMap, *rbacv1.Role, *rbacv1.RoleBinding: objLabels := labels.Set(obj.GetLabels()) if h.labelSelector.Matches(objLabels) { gatewayName := objLabels.Get(controller.GatewayLabel) @@ -114,7 +115,8 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, logger.Error(err, "error deprovisioning nginx resources") } h.store.deleteGateway(e.NamespacedName) - case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, *corev1.ConfigMap: + case *appsv1.Deployment, *corev1.Service, *corev1.ServiceAccount, + *corev1.ConfigMap, *rbacv1.Role, *rbacv1.RoleBinding: if err := h.reprovisionResources(ctx, e); err != nil { logger.Error(err, "error re-provisioning nginx resources") } diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index b62dc3b1f4..7c058bf784 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -11,6 +11,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -119,6 +120,11 @@ func (p *NginxProvisioner) buildNginxResourceObjects( ObjectMeta: objectMeta, } + var openshiftObjs []client.Object + if p.isOpenshift { + openshiftObjs = p.buildOpenshiftObjects(objectMeta) + } + ports := make(map[int32]struct{}) for _, listener := range gateway.Spec.Listeners { ports[int32(listener.Port)] = struct{}{} @@ -140,17 +146,21 @@ func (p *NginxProvisioner) buildNginxResourceObjects( ) // order to install resources: - // scc (if openshift) // secrets // configmaps // serviceaccount + // role/binding (if openshift) // service // deployment/daemonset - objects := make([]client.Object, 0, len(configmaps)+len(secrets)+3) + objects := make([]client.Object, 0, len(configmaps)+len(secrets)+len(openshiftObjs)+3) objects = append(objects, secrets...) objects = append(objects, configmaps...) - objects = append(objects, serviceAccount, service, deployment) + objects = append(objects, serviceAccount) + if p.isOpenshift { + objects = append(objects, openshiftObjs...) + } + objects = append(objects, service, deployment) return objects, err } @@ -366,6 +376,37 @@ func (p *NginxProvisioner) buildNginxConfigMaps( return []client.Object{bootstrapCM, agentCM} } +func (p *NginxProvisioner) buildOpenshiftObjects(objectMeta metav1.ObjectMeta) []client.Object { + role := &rbacv1.Role{ + ObjectMeta: objectMeta, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{p.cfg.NGINXSCCName}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }, + }, + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: objectMeta, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: objectMeta.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: objectMeta.Name, + Namespace: objectMeta.Namespace, + }, + }, + } + + return []client.Object{role, roleBinding} +} + func buildNginxService( objectMeta metav1.ObjectMeta, nProxyCfg *graph.EffectiveNginxProxy, @@ -606,6 +647,10 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }, ImagePullSecrets: []corev1.LocalObjectReference{}, ServiceAccountName: objectMeta.Name, + SecurityContext: &corev1.PodSecurityContext{ + FSGroup: helpers.GetPointer[int64](1001), + RunAsNonRoot: helpers.GetPointer(true), + }, Volumes: []corev1.Volume{ { Name: "token", @@ -812,10 +857,10 @@ func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName // order to delete: // deployment/daemonset // service + // role/binding (if openshift) // serviceaccount // configmaps // secrets - // scc (if openshift) objectMeta := metav1.ObjectMeta{ Name: deploymentNSName.Name, @@ -828,6 +873,19 @@ func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName service := &corev1.Service{ ObjectMeta: objectMeta, } + + objects := []client.Object{deployment, service} + + if p.isOpenshift { + role := &rbacv1.Role{ + ObjectMeta: objectMeta, + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: objectMeta, + } + objects = append(objects, role, roleBinding) + } + serviceAccount := &corev1.ServiceAccount{ ObjectMeta: objectMeta, } @@ -844,7 +902,7 @@ func (p *NginxProvisioner) buildNginxResourceObjectsForDeletion(deploymentNSName }, } - objects := []client.Object{deployment, service, serviceAccount, bootstrapCM, agentCM} + objects = append(objects, serviceAccount, bootstrapCM, agentCM) agentTLSSecretName := controller.CreateNginxResourceName( deploymentNSName.Name, diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go index 907a7e9fad..0871f846c9 100644 --- a/internal/mode/static/provisioner/objects_test.go +++ b/internal/mode/static/provisioner/objects_test.go @@ -7,6 +7,7 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -592,6 +593,65 @@ func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { })) } +func TestBuildNginxResourceObjects_OpenShift(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + agentTLSSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentTLSTestSecretName, + Namespace: ngfNamespace, + }, + Data: map[string][]byte{"tls.crt": []byte("tls")}, + } + fakeClient := fake.NewFakeClient(agentTLSSecret) + + provisioner := &NginxProvisioner{ + isOpenshift: true, + cfg: Config{ + GatewayPodConfig: &config.GatewayPodConfig{ + Namespace: ngfNamespace, + }, + AgentTLSSecretName: agentTLSTestSecretName, + }, + k8sClient: fakeClient, + baseLabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + } + + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "default", + }, + } + + resourceName := "gw-nginx" + objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objects).To(HaveLen(8)) + + expLabels := map[string]string{ + "app": "nginx", + "gateway.networking.k8s.io/gateway-name": "gw", + "app.kubernetes.io/name": "gw-nginx", + } + + roleObj := objects[4] + role, ok := roleObj.(*rbacv1.Role) + g.Expect(ok).To(BeTrue()) + g.Expect(role.GetLabels()).To(Equal(expLabels)) + + roleBindingObj := objects[5] + roleBinding, ok := roleBindingObj.(*rbacv1.RoleBinding) + g.Expect(ok).To(BeTrue()) + g.Expect(roleBinding.GetLabels()).To(Equal(expLabels)) +} + func TestGetAndUpdateSecret_NotFound(t *testing.T) { t.Parallel() g := NewWithT(t) @@ -754,3 +814,34 @@ func TestBuildNginxResourceObjectsForDeletion_Plus(t *testing.T) { provisioner.cfg.PlusUsageConfig.ClientSSLSecretName, )) } + +func TestBuildNginxResourceObjectsForDeletion_OpenShift(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + provisioner := &NginxProvisioner{isOpenshift: true} + + deploymentNSName := types.NamespacedName{ + Name: "gw-nginx", + Namespace: "default", + } + + objects := provisioner.buildNginxResourceObjectsForDeletion(deploymentNSName) + + g.Expect(objects).To(HaveLen(8)) + + validateMeta := func(obj client.Object, name string) { + g.Expect(obj.GetName()).To(Equal(name)) + g.Expect(obj.GetNamespace()).To(Equal(deploymentNSName.Namespace)) + } + + roleObj := objects[2] + role, ok := roleObj.(*rbacv1.Role) + g.Expect(ok).To(BeTrue()) + validateMeta(role, deploymentNSName.Name) + + roleBindingObj := objects[3] + roleBinding, ok := roleBindingObj.(*rbacv1.RoleBinding) + g.Expect(ok).To(BeTrue()) + validateMeta(roleBinding, deploymentNSName.Name) +} diff --git a/internal/mode/static/provisioner/openshift/openshift.go b/internal/mode/static/provisioner/openshift/openshift.go new file mode 100644 index 0000000000..3c89c4c988 --- /dev/null +++ b/internal/mode/static/provisioner/openshift/openshift.go @@ -0,0 +1,38 @@ +package openshift + +import ( + "fmt" + + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" +) + +//go:generate go tool counterfeiter -generate + +//counterfeiter:generate . APIChecker + +type APIChecker interface { + IsOpenshift(*rest.Config) (bool, error) +} + +type APICheckerImpl struct{} + +func (o *APICheckerImpl) IsOpenshift(config *rest.Config) (bool, error) { + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return false, fmt.Errorf("error creating discovery client: %w", err) + } + + apiList, err := discoveryClient.ServerGroups() + if err != nil { + return false, fmt.Errorf("error getting server groups: %w", err) + } + + for _, group := range apiList.Groups { + if group.Name == "security.openshift.io" { + return true, nil + } + } + + return false, nil +} diff --git a/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go b/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go new file mode 100644 index 0000000000..d1e108544d --- /dev/null +++ b/internal/mode/static/provisioner/openshift/openshiftfakes/fake_apichecker.go @@ -0,0 +1,117 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package openshiftfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift" + "k8s.io/client-go/rest" +) + +type FakeAPIChecker struct { + IsOpenshiftStub func(*rest.Config) (bool, error) + isOpenshiftMutex sync.RWMutex + isOpenshiftArgsForCall []struct { + arg1 *rest.Config + } + isOpenshiftReturns struct { + result1 bool + result2 error + } + isOpenshiftReturnsOnCall map[int]struct { + result1 bool + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeAPIChecker) IsOpenshift(arg1 *rest.Config) (bool, error) { + fake.isOpenshiftMutex.Lock() + ret, specificReturn := fake.isOpenshiftReturnsOnCall[len(fake.isOpenshiftArgsForCall)] + fake.isOpenshiftArgsForCall = append(fake.isOpenshiftArgsForCall, struct { + arg1 *rest.Config + }{arg1}) + stub := fake.IsOpenshiftStub + fakeReturns := fake.isOpenshiftReturns + fake.recordInvocation("IsOpenshift", []interface{}{arg1}) + fake.isOpenshiftMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeAPIChecker) IsOpenshiftCallCount() int { + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + return len(fake.isOpenshiftArgsForCall) +} + +func (fake *FakeAPIChecker) IsOpenshiftCalls(stub func(*rest.Config) (bool, error)) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = stub +} + +func (fake *FakeAPIChecker) IsOpenshiftArgsForCall(i int) *rest.Config { + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + argsForCall := fake.isOpenshiftArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeAPIChecker) IsOpenshiftReturns(result1 bool, result2 error) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = nil + fake.isOpenshiftReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeAPIChecker) IsOpenshiftReturnsOnCall(i int, result1 bool, result2 error) { + fake.isOpenshiftMutex.Lock() + defer fake.isOpenshiftMutex.Unlock() + fake.IsOpenshiftStub = nil + if fake.isOpenshiftReturnsOnCall == nil { + fake.isOpenshiftReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.isOpenshiftReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeAPIChecker) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.isOpenshiftMutex.RLock() + defer fake.isOpenshiftMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeAPIChecker) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ openshift.APIChecker = new(FakeAPIChecker) diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index e4a0ce7bee..3151d56c4f 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -27,6 +27,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/events" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/status" ) @@ -44,6 +45,7 @@ type Provisioner interface { type Config struct { GCName string AgentTLSSecretName string + NGINXSCCName string DeploymentStore agent.DeploymentStorer StatusQueue *status.Queue @@ -66,10 +68,13 @@ type NginxProvisioner struct { baseLabelSelector metav1.LabelSelector cfg Config leader bool + isOpenshift bool lock sync.RWMutex } +var apiChecker openshift.APIChecker = &openshift.APICheckerImpl{} + // NewNginxProvisioner returns a new instance of a Provisioner that will deploy nginx resources. func NewNginxProvisioner( ctx context.Context, @@ -82,6 +87,7 @@ func NewNginxProvisioner( caSecretName = cfg.PlusUsageConfig.CASecretName clientSSLSecretName = cfg.PlusUsageConfig.ClientSSLSecretName } + store := newStore( cfg.NginxDockerSecretNames, cfg.AgentTLSSecretName, @@ -100,12 +106,18 @@ func NewNginxProvisioner( }, } + isOpenshift, err := apiChecker.IsOpenshift(mgr.GetConfig()) + if err != nil { + cfg.Logger.Error(err, "could not determine if running in openshift, will not create Role/RoleBinding") + } + provisioner := &NginxProvisioner{ k8sClient: mgr.GetClient(), store: store, baseLabelSelector: selector, resourcesToDeleteOnStartup: []types.NamespacedName{}, cfg: cfg, + isOpenshift: isOpenshift, } handler, err := newEventHandler(store, provisioner, selector, cfg.GCName) @@ -123,6 +135,7 @@ func NewNginxProvisioner( cfg.NginxDockerSecretNames, cfg.AgentTLSSecretName, cfg.PlusUsageConfig, + isOpenshift, ) if err != nil { return nil, nil, err diff --git a/internal/mode/static/provisioner/provisioner_test.go b/internal/mode/static/provisioner/provisioner_test.go index 987b835352..d89caefade 100644 --- a/internal/mode/static/provisioner/provisioner_test.go +++ b/internal/mode/static/provisioner/provisioner_test.go @@ -24,6 +24,7 @@ import ( "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/agentfakes" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/openshift/openshiftfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" ) @@ -202,6 +203,7 @@ func TestNewNginxProvisioner(t *testing.T) { Logger: logr.Discard(), } + apiChecker = &openshiftfakes.FakeAPIChecker{} provisioner, eventLoop, err := NewNginxProvisioner(context.TODO(), mgr, cfg) g.Expect(err).ToNot(HaveOccurred()) g.Expect(provisioner).NotTo(BeNil()) diff --git a/internal/mode/static/provisioner/setter.go b/internal/mode/static/provisioner/setter.go index dfe42321bc..3d5c84e780 100644 --- a/internal/mode/static/provisioner/setter.go +++ b/internal/mode/static/provisioner/setter.go @@ -3,6 +3,7 @@ package provisioner import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -20,6 +21,10 @@ func objectSpecSetter(object client.Object) controllerutil.MutateFn { return configMapSpecSetter(obj, obj.Data) case *corev1.Secret: return secretSpecSetter(obj, obj.Data) + case *rbacv1.Role: + return roleSpecSetter(obj, obj.Rules) + case *rbacv1.RoleBinding: + return roleBindingSpecSetter(obj, obj.RoleRef, obj.Subjects) } return nil @@ -52,3 +57,22 @@ func secretSpecSetter(secret *corev1.Secret, data map[string][]byte) controlleru return nil } } + +func roleSpecSetter(role *rbacv1.Role, rules []rbacv1.PolicyRule) controllerutil.MutateFn { + return func() error { + role.Rules = rules + return nil + } +} + +func roleBindingSpecSetter( + roleBinding *rbacv1.RoleBinding, + roleRef rbacv1.RoleRef, + subjects []rbacv1.Subject, +) controllerutil.MutateFn { + return func() error { + roleBinding.RoleRef = roleRef + roleBinding.Subjects = subjects + return nil + } +} diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go index e28a42e103..0af617852c 100644 --- a/internal/mode/static/provisioner/store.go +++ b/internal/mode/static/provisioner/store.go @@ -7,6 +7,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -21,6 +22,8 @@ type NginxResources struct { Deployment metav1.ObjectMeta Service metav1.ObjectMeta ServiceAccount metav1.ObjectMeta + Role metav1.ObjectMeta + RoleBinding metav1.ObjectMeta BootstrapConfigMap metav1.ObjectMeta AgentConfigMap metav1.ObjectMeta AgentTLSSecret metav1.ObjectMeta @@ -143,6 +146,22 @@ func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedNa } else { cfg.ServiceAccount = obj.ObjectMeta } + case *rbacv1.Role: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + Role: obj.ObjectMeta, + } + } else { + cfg.Role = obj.ObjectMeta + } + case *rbacv1.RoleBinding: + if cfg, ok := s.nginxResources[gatewayNSName]; !ok { + s.nginxResources[gatewayNSName] = &NginxResources{ + RoleBinding: obj.ObjectMeta, + } + } else { + cfg.RoleBinding = obj.ObjectMeta + } case *corev1.ConfigMap: s.registerConfigMapInGatewayConfig(obj, gatewayNSName) case *corev1.Secret: @@ -260,6 +279,7 @@ func (s *store) deleteResourcesForGateway(nsName types.NamespacedName) { delete(s.nginxResources, nsName) } +//nolint:gocyclo // will refactor at some point func (s *store) gatewayExistsForResource(object client.Object, nsName types.NamespacedName) *graph.Gateway { s.lock.RLock() defer s.lock.RUnlock() @@ -278,6 +298,14 @@ func (s *store) gatewayExistsForResource(object client.Object, nsName types.Name if resourceMatches(resources.ServiceAccount, nsName) { return resources.Gateway } + case *rbacv1.Role: + if resourceMatches(resources.Role, nsName) { + return resources.Gateway + } + case *rbacv1.RoleBinding: + if resourceMatches(resources.RoleBinding, nsName) { + return resources.Gateway + } case *corev1.ConfigMap: if resourceMatches(resources.BootstrapConfigMap, nsName) { return resources.Gateway diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go index 814ef1bd79..59e7da1207 100644 --- a/internal/mode/static/provisioner/store_test.go +++ b/internal/mode/static/provisioner/store_test.go @@ -7,6 +7,7 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -167,6 +168,30 @@ func TestRegisterResourceInGatewayConfig(t *testing.T) { // clear out resources before next test store.deleteResourcesForGateway(nsName) + // Role + role := &rbacv1.Role{ObjectMeta: defaultMeta} + resources = registerAndGetResources(role) + g.Expect(resources.Role).To(Equal(defaultMeta)) + + // Role again, already exists + resources = registerAndGetResources(role) + g.Expect(resources.Role).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + + // RoleBinding + roleBinding := &rbacv1.RoleBinding{ObjectMeta: defaultMeta} + resources = registerAndGetResources(roleBinding) + g.Expect(resources.RoleBinding).To(Equal(defaultMeta)) + + // RoleBinding again, already exists + resources = registerAndGetResources(roleBinding) + g.Expect(resources.RoleBinding).To(Equal(defaultMeta)) + + // clear out resources before next test + store.deleteResourcesForGateway(nsName) + // ConfigMap bootstrapCMMeta := metav1.ObjectMeta{ Name: controller.CreateNginxResourceName(defaultMeta.Name, nginxIncludesConfigMapNameSuffix), @@ -406,6 +431,14 @@ func TestGatewayExistsForResource(t *testing.T) { Name: "test-serviceaccount", Namespace: "default", }, + Role: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + RoleBinding: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, BootstrapConfigMap: metav1.ObjectMeta{ Name: "test-bootstrap-configmap", Namespace: "default", @@ -473,6 +506,26 @@ func TestGatewayExistsForResource(t *testing.T) { }, expected: gateway, }, + { + name: "Role exists", + object: &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + }, + expected: gateway, + }, + { + name: "RoleBinding exists", + object: &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, + }, + expected: gateway, + }, { name: "Bootstrap ConfigMap exists", object: &corev1.ConfigMap{ From d3b531996f39b9f04e4e30c4078f2208c0c0e97e Mon Sep 17 00:00:00 2001 From: salonichf5 <146118978+salonichf5@users.noreply.github.com> Date: Tue, 22 Apr 2025 19:29:44 +0530 Subject: [PATCH 20/32] Add support for multiple gateways (#3275) Problem: Users want to be able to configure multiple Gateways with a single installation of NGF. Solution: Support the ability to create multiple Gateways. Routes and policies can be attached to multiple Gateways. Also fixed conformance tests. --------- Co-authored-by: Saylor Berman --- apis/v1alpha2/nginxproxy_types.go | 2 +- cmd/gateway/commands.go | 43 +- cmd/gateway/commands_test.go | 66 - cmd/gateway/validating_types.go | 30 - cmd/gateway/validation.go | 35 - cmd/gateway/validation_test.go | 132 -- internal/mode/static/config/config.go | 6 - internal/mode/static/handler.go | 123 +- internal/mode/static/handler_test.go | 198 +-- internal/mode/static/manager.go | 46 +- internal/mode/static/manager_test.go | 63 +- .../policies/clientsettings/validator.go | 10 +- .../policies/clientsettings/validator_test.go | 13 +- .../policies/observability/validator.go | 37 +- .../policies/observability/validator_test.go | 95 +- .../policies/policiesfakes/fake_validator.go | 94 +- .../static/nginx/config/policies/policy.go | 2 - .../policies/upstreamsettings/validator.go | 10 +- .../upstreamsettings/validator_test.go | 13 +- .../static/nginx/config/policies/validator.go | 23 +- .../nginx/config/policies/validator_test.go | 44 +- internal/mode/static/provisioner/objects.go | 11 +- .../static/state/change_processor_test.go | 1215 +++++++++----- .../static/state/conditions/conditions.go | 15 - .../static/state/dataplane/configuration.go | 197 ++- .../state/dataplane/configuration_test.go | 1212 ++++++++------ .../mode/static/state/graph/backend_refs.go | 105 +- .../static/state/graph/backend_refs_test.go | 233 ++- .../static/state/graph/backend_tls_policy.go | 56 +- .../state/graph/backend_tls_policy_test.go | 206 ++- internal/mode/static/state/graph/gateway.go | 163 +- .../static/state/graph/gateway_listener.go | 8 +- .../mode/static/state/graph/gateway_test.go | 1473 ++++++++++------- internal/mode/static/state/graph/graph.go | 94 +- .../mode/static/state/graph/graph_test.go | 556 ++++--- internal/mode/static/state/graph/grpcroute.go | 19 +- .../mode/static/state/graph/grpcroute_test.go | 141 +- internal/mode/static/state/graph/httproute.go | 8 +- .../mode/static/state/graph/httproute_test.go | 98 +- .../state/graph/multiple_gateways_test.go | 895 ++++++++++ internal/mode/static/state/graph/namespace.go | 24 +- .../mode/static/state/graph/namespace_test.go | 162 +- .../mode/static/state/graph/nginxproxy.go | 21 +- .../static/state/graph/nginxproxy_test.go | 36 +- internal/mode/static/state/graph/policies.go | 122 +- .../mode/static/state/graph/policies_test.go | 487 ++++-- .../mode/static/state/graph/route_common.go | 268 +-- .../static/state/graph/route_common_test.go | 1256 ++++++++++---- internal/mode/static/state/graph/service.go | 119 +- .../mode/static/state/graph/service_test.go | 246 +-- internal/mode/static/state/graph/tlsroute.go | 42 +- .../mode/static/state/graph/tlsroute_test.go | 158 +- .../validationfakes/fake_policy_validator.go | 94 +- .../mode/static/state/validation/validator.go | 4 +- .../mode/static/status/prepare_requests.go | 55 +- .../static/status/prepare_requests_test.go | 181 +- internal/mode/static/telemetry/collector.go | 5 +- .../mode/static/telemetry/collector_test.go | 14 +- tests/conformance/conformance-rbac.yaml | 1 + tests/framework/resourcemanager.go | 74 +- tests/suite/advanced_routing_test.go | 2 +- tests/suite/client_settings_test.go | 7 +- tests/suite/graceful_recovery_test.go | 12 +- .../clientsettings/ignored-gateway.yaml | 11 - .../manifests/clientsettings/invalid-csp.yaml | 18 - .../clientsettings/invalid-route-csp.yaml | 33 + tests/suite/nginxgateway_test.go | 2 +- tests/suite/sample_test.go | 2 +- tests/suite/snippets_filter_test.go | 2 +- tests/suite/tracing_test.go | 2 +- tests/suite/upstream_settings_test.go | 2 +- 71 files changed, 7207 insertions(+), 4045 deletions(-) create mode 100644 internal/mode/static/state/graph/multiple_gateways_test.go delete mode 100644 tests/suite/manifests/clientsettings/ignored-gateway.yaml delete mode 100644 tests/suite/manifests/clientsettings/invalid-csp.yaml create mode 100644 tests/suite/manifests/clientsettings/invalid-route-csp.yaml diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index bfa21aca59..3a0a3ccc73 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -550,7 +550,7 @@ const ( // ExternalTrafficPolicy describes how nodes distribute service traffic they // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, -// and LoadBalancer IPs. +// and LoadBalancer IPs. Ignored for ClusterIP services. // +kubebuilder:validation:Enum=Cluster;Local type ExternalTrafficPolicy corev1.ServiceExternalTrafficPolicy diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index 853ddd0f72..be076a76da 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -12,7 +12,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" ctlr "sigs.k8s.io/controller-runtime" @@ -59,11 +58,9 @@ func createRootCommand() *cobra.Command { func createControllerCommand() *cobra.Command { // flag names const ( - gatewayFlag = "gateway" configFlag = "config" serviceFlag = "service" agentTLSSecretFlag = "agent-tls-secret" - updateGCStatusFlag = "update-gatewayclass-status" metricsDisableFlag = "metrics-disable" metricsSecureFlag = "metrics-secure-serving" metricsPortFlag = "metrics-port" @@ -94,9 +91,7 @@ func createControllerCommand() *cobra.Command { validator: validateResourceName, } - updateGCStatus bool - gateway = namespacedNameValue{} - configName = stringValidatingValue{ + configName = stringValidatingValue{ validator: validateResourceName, } serviceName = stringValidatingValue{ @@ -200,11 +195,6 @@ func createControllerCommand() *cobra.Command { return fmt.Errorf("error parsing telemetry endpoint insecure: %w", err) } - var gwNsName *types.NamespacedName - if cmd.Flags().Changed(gatewayFlag) { - gwNsName = &gateway.value - } - var usageReportConfig config.UsageReportConfig if plus && usageReportSecretName.value == "" { return errors.New("usage-report-secret is required when using NGINX Plus") @@ -229,14 +219,12 @@ func createControllerCommand() *cobra.Command { } conf := config.Config{ - GatewayCtlrName: gatewayCtlrName.value, - ConfigName: configName.String(), - Logger: logger, - AtomicLevel: atom, - GatewayClassName: gatewayClassName.value, - GatewayNsName: gwNsName, - UpdateGatewayClassStatus: updateGCStatus, - GatewayPodConfig: podConfig, + GatewayCtlrName: gatewayCtlrName.value, + ConfigName: configName.String(), + Logger: logger, + AtomicLevel: atom, + GatewayClassName: gatewayClassName.value, + GatewayPodConfig: podConfig, HealthConfig: config.HealthConfig{ Enabled: !disableHealth, Port: healthListenPort.value, @@ -293,16 +281,6 @@ func createControllerCommand() *cobra.Command { ) utilruntime.Must(cmd.MarkFlagRequired(gatewayClassFlag)) - cmd.Flags().Var( - &gateway, - gatewayFlag, - "The namespaced name of the Gateway resource to use. "+ - "Must be of the form: NAMESPACE/NAME. "+ - "If not specified, the control plane will process all Gateways for the configured GatewayClass. "+ - "However, among them, it will choose the oldest resource by creation timestamp. If the timestamps are "+ - "equal, it will choose the resource that appears first in alphabetical order by {namespace}/{name}.", - ) - cmd.Flags().VarP( &configName, configFlag, @@ -326,13 +304,6 @@ func createControllerCommand() *cobra.Command { `NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).`, ) - cmd.Flags().BoolVar( - &updateGCStatus, - updateGCStatusFlag, - true, - "Update the status of the GatewayClass resource.", - ) - cmd.Flags().BoolVar( &disableMetrics, metricsDisableFlag, diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 0cc031a1c5..8db899f1cb 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -9,7 +9,6 @@ import ( . "github.com/onsi/gomega" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/types" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" ) @@ -137,11 +136,9 @@ func TestControllerCmdFlagValidation(t *testing.T) { args: []string{ "--gateway-ctlr-name=gateway.nginx.org/nginx-gateway", // common and required flag "--gatewayclass=nginx", // common and required flag - "--gateway=nginx-gateway/nginx", "--config=nginx-gateway-config", "--service=nginx-gateway", "--agent-tls-secret=agent-tls", - "--update-gatewayclass-status=true", "--metrics-port=9114", "--metrics-disable", "--metrics-secure-serving", @@ -170,23 +167,6 @@ func TestControllerCmdFlagValidation(t *testing.T) { }, wantErr: false, }, - { - name: "gateway is set to empty string", - args: []string{ - "--gateway=", - }, - wantErr: true, - expectedErrPrefix: `invalid argument "" for "--gateway" flag: must be set`, - }, - { - name: "gateway is invalid", - args: []string{ - "--gateway=nginx-gateway", // no namespace - }, - wantErr: true, - expectedErrPrefix: `invalid argument "nginx-gateway" for "--gateway" flag: invalid format; ` + - "must be NAMESPACE/NAME", - }, { name: "config is set to empty string", args: []string{ @@ -235,22 +215,6 @@ func TestControllerCmdFlagValidation(t *testing.T) { wantErr: true, expectedErrPrefix: `invalid argument "!@#$" for "--agent-tls-secret" flag: invalid format`, }, - { - name: "update-gatewayclass-status is set to empty string", - args: []string{ - "--update-gatewayclass-status=", - }, - wantErr: true, - expectedErrPrefix: `invalid argument "" for "--update-gatewayclass-status" flag: strconv.ParseBool`, - }, - { - name: "update-gatewayclass-status is invalid", - args: []string{ - "--update-gatewayclass-status=invalid", // not a boolean - }, - wantErr: true, - expectedErrPrefix: `invalid argument "invalid" for "--update-gatewayclass-status" flag: strconv.ParseBool`, - }, { name: "metrics-port is invalid type", args: []string{ @@ -727,30 +691,6 @@ func TestParseFlags(t *testing.T) { err = flagSet.Set("customStringFlagUserDefined", "changed-test-flag-value") g.Expect(err).To(Not(HaveOccurred())) - customStringFlagNoDefaultValueUnset := namespacedNameValue{ - value: types.NamespacedName{}, - } - flagSet.Var( - &customStringFlagNoDefaultValueUnset, - "customStringFlagNoDefaultValueUnset", - "no default value custom string test flag", - ) - - customStringFlagNoDefaultValueUserDefined := namespacedNameValue{ - value: types.NamespacedName{}, - } - flagSet.Var( - &customStringFlagNoDefaultValueUserDefined, - "customStringFlagNoDefaultValueUserDefined", - "no default value but with user defined namespacedName test flag", - ) - userDefinedNamespacedName := types.NamespacedName{ - Namespace: "changed-namespace", - Name: "changed-name", - } - err = flagSet.Set("customStringFlagNoDefaultValueUserDefined", userDefinedNamespacedName.String()) - g.Expect(err).To(Not(HaveOccurred())) - expectedKeys := []string{ "boolFlagTrue", "boolFlagFalse", @@ -760,9 +700,6 @@ func TestParseFlags(t *testing.T) { "customStringFlagDefault", "customStringFlagUserDefined", - - "customStringFlagNoDefaultValueUnset", - "customStringFlagNoDefaultValueUserDefined", } expectedValues := []string{ "true", @@ -773,9 +710,6 @@ func TestParseFlags(t *testing.T) { "default", "user-defined", - - "default", - "user-defined", } flagKeys, flagValues := parseFlags(flagSet) diff --git a/cmd/gateway/validating_types.go b/cmd/gateway/validating_types.go index 1db3eab8dc..c0fd93da81 100644 --- a/cmd/gateway/validating_types.go +++ b/cmd/gateway/validating_types.go @@ -6,8 +6,6 @@ import ( "fmt" "strconv" "strings" - - "k8s.io/apimachinery/pkg/types" ) // stringValidatingValue is a string flag value with custom validation logic. @@ -106,31 +104,3 @@ func (v *intValidatingValue) Set(param string) error { func (v *intValidatingValue) Type() string { return "int" } - -// namespacedNameValue is a string flag value that represents a namespaced name. -// it implements the pflag.Value interface. -type namespacedNameValue struct { - value types.NamespacedName -} - -func (v *namespacedNameValue) String() string { - if (v.value == types.NamespacedName{}) { - // if we don't do that, the default value in the help message will be printed as "/" - return "" - } - return v.value.String() -} - -func (v *namespacedNameValue) Set(param string) error { - nsname, err := parseNamespacedResourceName(param) - if err != nil { - return err - } - - v.value = nsname - return nil -} - -func (v *namespacedNameValue) Type() string { - return "string" -} diff --git a/cmd/gateway/validation.go b/cmd/gateway/validation.go index 9e07a3a918..a953c522c1 100644 --- a/cmd/gateway/validation.go +++ b/cmd/gateway/validation.go @@ -8,7 +8,6 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" ) @@ -55,40 +54,6 @@ func validateResourceName(value string) error { return nil } -func validateNamespaceName(value string) error { - // used by Kubernetes to validate resource namespace names - messages := validation.IsDNS1123Label(value) - if len(messages) > 0 { - msg := strings.Join(messages, "; ") - return fmt.Errorf("invalid format: %s", msg) - } - - return nil -} - -func parseNamespacedResourceName(value string) (types.NamespacedName, error) { - if value == "" { - return types.NamespacedName{}, errors.New("must be set") - } - - parts := strings.Split(value, "/") - if len(parts) != 2 { - return types.NamespacedName{}, errors.New("invalid format; must be NAMESPACE/NAME") - } - - if err := validateNamespaceName(parts[0]); err != nil { - return types.NamespacedName{}, fmt.Errorf("invalid namespace name: %w", err) - } - if err := validateResourceName(parts[1]); err != nil { - return types.NamespacedName{}, fmt.Errorf("invalid resource name: %w", err) - } - - return types.NamespacedName{ - Namespace: parts[0], - Name: parts[1], - }, nil -} - func validateQualifiedName(name string) error { if len(name) == 0 { return errors.New("must be set") diff --git a/cmd/gateway/validation_test.go b/cmd/gateway/validation_test.go index 59db6fc57c..665bd91582 100644 --- a/cmd/gateway/validation_test.go +++ b/cmd/gateway/validation_test.go @@ -4,7 +4,6 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/types" ) func TestValidateGatewayControllerName(t *testing.T) { @@ -132,137 +131,6 @@ func TestValidateResourceName(t *testing.T) { } } -func TestValidateNamespaceName(t *testing.T) { - t.Parallel() - tests := []struct { - name string - value string - expErr bool - }{ - { - name: "valid", - value: "mynamespace", - expErr: false, - }, - { - name: "valid - with dash", - value: "my-namespace", - expErr: false, - }, - { - name: "valid - with numbers", - value: "mynamespace123", - expErr: false, - }, - { - name: "invalid - empty", - value: "", - expErr: true, - }, - { - name: "invalid - invalid character '.'", - value: "my.namespace", - expErr: true, - }, - { - name: "invalid - invalid character '/'", - value: "my/namespace", - expErr: true, - }, - { - name: "invalid - invalid character '_'", - value: "my_namespace", - expErr: true, - }, - { - name: "invalid - invalid character '@'", - value: "my@namespace", - expErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - err := validateNamespaceName(test.value) - - if test.expErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} - -func TestParseNamespacedResourceName(t *testing.T) { - t.Parallel() - tests := []struct { - name string - value string - expectedErrPrefix string - expectedNsName types.NamespacedName - expectErr bool - }{ - { - name: "valid", - value: "test/my-gateway", - expectedNsName: types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, - expectErr: false, - }, - { - name: "empty", - value: "", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "must be set", - }, - { - name: "wrong number of parts", - value: "test", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid format; must be NAMESPACE/NAME", - }, - { - name: "invalid namespace", - value: "t@st/my-gateway", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid namespace name", - }, - { - name: "invalid name", - value: "test/my-g@teway", - expectedNsName: types.NamespacedName{}, - expectErr: true, - expectedErrPrefix: "invalid resource name", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - nsName, err := parseNamespacedResourceName(test.value) - - if test.expectErr { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(HavePrefix(test.expectedErrPrefix)) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(nsName).To(Equal(test.expectedNsName)) - } - }) - } -} - func TestValidateQualifiedName(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/config/config.go b/internal/mode/static/config/config.go index 9248070e03..25630eb8f0 100644 --- a/internal/mode/static/config/config.go +++ b/internal/mode/static/config/config.go @@ -5,7 +5,6 @@ import ( "github.com/go-logr/logr" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" ) const DefaultNginxMetricsPort = int32(9113) @@ -19,9 +18,6 @@ type Config struct { ImageSource string // Flags contains the NGF command-line flag names and values. Flags Flags - // GatewayNsName is the namespaced name of a Gateway resource that the Gateway will use. - // The Gateway will ignore all other Gateway resources. - GatewayNsName *types.NamespacedName // GatewayPodConfig contains information about this Pod. GatewayPodConfig GatewayPodConfig // Logger is the Zap Logger used by all components. @@ -46,8 +42,6 @@ type Config struct { MetricsConfig MetricsConfig // HealthConfig specifies the health probe config. HealthConfig HealthConfig - // UpdateGatewayClassStatus enables updating the status of the GatewayClass resource. - UpdateGatewayClassStatus bool // Plus indicates whether NGINX Plus is being used. Plus bool // ExperimentalFeatures indicates if experimental features are enabled. diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index c22b182e8e..af510dadfc 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "sync" "time" @@ -79,8 +80,6 @@ type eventHandlerConfig struct { gatewayCtlrName string // gatewayClassName is the name of the GatewayClass. gatewayClassName string - // updateGatewayClassStatus enables updating the status of the GatewayClass resource. - updateGatewayClassStatus bool // plus is whether or not we are running NGINX Plus. plus bool } @@ -185,7 +184,7 @@ func (h *eventHandlerImpl) sendNginxConfig( return } - if gr.Gateway == nil { + if len(gr.Gateways) == 0 { // still need to update GatewayClass status obj := &status.QueueObject{ UpdateType: status.UpdateAll, @@ -194,40 +193,42 @@ func (h *eventHandlerImpl) sendNginxConfig( return } - go func() { - if err := h.cfg.nginxProvisioner.RegisterGateway(ctx, gr.Gateway, gr.DeploymentName.Name); err != nil { - logger.Error(err, "error from provisioner") - } - }() + for _, gw := range gr.Gateways { + go func() { + if err := h.cfg.nginxProvisioner.RegisterGateway(ctx, gw, gw.DeploymentName.Name); err != nil { + logger.Error(err, "error from provisioner") + } + }() - if !gr.Gateway.Valid { - obj := &status.QueueObject{ - Deployment: gr.DeploymentName, - UpdateType: status.UpdateAll, + if !gw.Valid { + obj := &status.QueueObject{ + Deployment: gw.DeploymentName, + UpdateType: status.UpdateAll, + } + h.cfg.statusQueue.Enqueue(obj) + return } - h.cfg.statusQueue.Enqueue(obj) - return - } - stopCh := make(chan struct{}) - deployment := h.cfg.nginxDeployments.GetOrStore(ctx, gr.DeploymentName, stopCh) - if deployment == nil { - panic("expected deployment, got nil") - } + stopCh := make(chan struct{}) + deployment := h.cfg.nginxDeployments.GetOrStore(ctx, gw.DeploymentName, stopCh) + if deployment == nil { + panic("expected deployment, got nil") + } - configApplied := h.processStateAndBuildConfig(ctx, logger, gr, changeType, deployment) + configApplied := h.processStateAndBuildConfig(ctx, logger, gr, gw, changeType, deployment) - configErr := deployment.GetLatestConfigError() - upstreamErr := deployment.GetLatestUpstreamError() - err := errors.Join(configErr, upstreamErr) + configErr := deployment.GetLatestConfigError() + upstreamErr := deployment.GetLatestUpstreamError() + err := errors.Join(configErr, upstreamErr) - if configApplied || err != nil { - obj := &status.QueueObject{ - UpdateType: status.UpdateAll, - Error: err, - Deployment: gr.DeploymentName, + if configApplied || err != nil { + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, + Error: err, + Deployment: gw.DeploymentName, + } + h.cfg.statusQueue.Enqueue(obj) } - h.cfg.statusQueue.Enqueue(obj) } } @@ -235,6 +236,7 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( ctx context.Context, logger logr.Logger, gr *graph.Graph, + currentGateway *graph.Gateway, changeType state.ChangeType, deployment *agent.Deployment, ) bool { @@ -242,7 +244,7 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( switch changeType { case state.EndpointsOnlyChange: h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) + cfg := dataplane.BuildConfiguration(ctx, gr, currentGateway, h.cfg.serviceResolver, h.version, h.cfg.plus) depCtx, getErr := h.getDeploymentContext(ctx) if getErr != nil { logger.Error(getErr, "error getting deployment context for usage reporting") @@ -260,7 +262,7 @@ func (h *eventHandlerImpl) processStateAndBuildConfig( deployment.FileLock.Unlock() case state.ClusterStateChange: h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version, h.cfg.plus) + cfg := dataplane.BuildConfiguration(ctx, gr, currentGateway, h.cfg.serviceResolver, h.version, h.cfg.plus) depCtx, getErr := h.getDeploymentContext(ctx) if getErr != nil { logger.Error(getErr, "error getting deployment context for usage reporting") @@ -284,32 +286,46 @@ func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { return } - // TODO(sberman): once we support multiple Gateways, we'll have to get - // the correct Graph for the Deployment contained in the update message gr := h.cfg.processor.GetLatestGraph() if gr == nil { continue } var nginxReloadRes graph.NginxReloadResult + var gw *graph.Gateway + if item.Deployment.Name != "" { + gwNSName := types.NamespacedName{ + Namespace: item.Deployment.Namespace, + Name: strings.TrimSuffix(item.Deployment.Name, fmt.Sprintf("-%s", h.cfg.gatewayClassName)), + } + + gw = gr.Gateways[gwNSName] + } + switch { case item.Error != nil: h.cfg.logger.Error(item.Error, "Failed to update NGINX configuration") nginxReloadRes.Error = item.Error - case gr.Gateway != nil: + case gw != nil: h.cfg.logger.Info("NGINX configuration was successfully updated") } - gr.LatestReloadResult = nginxReloadRes + if gw != nil { + gw.LatestReloadResult = nginxReloadRes + } switch item.UpdateType { case status.UpdateAll: - h.updateStatuses(ctx, gr) + h.updateStatuses(ctx, gr, gw) case status.UpdateGateway: + if gw == nil { + continue + } + gwAddresses, err := getGatewayAddresses( ctx, h.cfg.k8sClient, item.GatewayService, - gr.Gateway, + gw, h.cfg.gatewayClassName, ) if err != nil { @@ -326,12 +342,12 @@ func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { } transitionTime := metav1.Now() + gatewayStatuses := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, + gw, transitionTime, gwAddresses, - gr.LatestReloadResult, + gw.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...) default: @@ -340,8 +356,16 @@ func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { } } -func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) { - gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, gr.Gateway, h.cfg.gatewayClassName) +func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph, gw *graph.Gateway) { + transitionTime := metav1.Now() + gcReqs := status.PrepareGatewayClassRequests(gr.GatewayClass, gr.IgnoredGatewayClasses, transitionTime) + + if gw == nil { + h.cfg.statusUpdater.UpdateGroup(ctx, groupAllExceptGateways, gcReqs...) + return + } + + gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, gw, h.cfg.gatewayClassName) if err != nil { msg := "error getting Gateway Service IP address" h.cfg.logger.Error(err, msg) @@ -354,17 +378,11 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) ) } - transitionTime := metav1.Now() - - var gcReqs []frameworkStatus.UpdateRequest - if h.cfg.updateGatewayClassStatus { - gcReqs = status.PrepareGatewayClassRequests(gr.GatewayClass, gr.IgnoredGatewayClasses, transitionTime) - } routeReqs := status.PrepareRouteRequests( gr.L4Routes, gr.Routes, transitionTime, - gr.LatestReloadResult, + gw.LatestReloadResult, h.cfg.gatewayCtlrName, ) @@ -392,11 +410,10 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) // We put Gateway status updates separately from the rest of the statuses because we want to be able // to update them separately from the rest of the graph whenever the public IP of NGF changes. gwReqs := status.PrepareGatewayRequests( - gr.Gateway, - gr.IgnoredGateways, + gw, transitionTime, gwAddresses, - gr.LatestReloadResult, + gw.LatestReloadResult, ) h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gwReqs...) } diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 5175195a7f..6d62be42b3 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -19,6 +19,7 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/events" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/status/statusfakes" @@ -56,17 +57,6 @@ var _ = Describe("eventHandler", func() { cancel context.CancelFunc ) - const nginxGatewayServiceName = "nginx-gateway" - - createService := func(name string) *v1.Service { - return &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "nginx-gateway", - }, - } - } - expectReconfig := func(expectedConf dataplane.Configuration, expectedFiles []agent.File) { Expect(fakeProcessor.ProcessCallCount()).Should(Equal(1)) @@ -96,9 +86,15 @@ var _ = Describe("eventHandler", func() { ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext // ignore for test baseGraph = &graph.Graph{ - Gateway: &graph.Gateway{ - Valid: true, - Source: &gatewayv1.Gateway{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway"}: { + Valid: true, + Source: &gatewayv1.Gateway{}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), + }, + }, }, } @@ -116,9 +112,6 @@ var _ = Describe("eventHandler", func() { fakeK8sClient = fake.NewFakeClient() queue = status.NewQueue() - // Needed because handler checks the service from the API on every HandleEventBatch - Expect(fakeK8sClient.Create(context.Background(), createService(nginxGatewayServiceName))).To(Succeed()) - handler = newEventHandlerImpl(eventHandlerConfig{ ctx: ctx, k8sClient: fakeK8sClient, @@ -138,8 +131,8 @@ var _ = Describe("eventHandler", func() { ServiceName: "nginx-gateway", Namespace: "nginx-gateway", }, - metricsCollector: collectors.NewControllerNoopCollector(), - updateGatewayClassStatus: true, + gatewayClassName: "nginx", + metricsCollector: collectors.NewControllerNoopCollector(), }) Expect(handler.cfg.graphBuiltHealthChecker.ready).To(BeFalse()) }) @@ -185,13 +178,12 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) checkUpsertEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) }) - It("should process Delete", func() { e := &events.DeleteEvent{ Type: &gatewayv1.HTTPRoute{}, @@ -201,7 +193,7 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) checkDeleteEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) @@ -219,6 +211,51 @@ var _ = Describe("eventHandler", func() { checkUpsertEventExpectations(e) Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + // status update for GatewayClass should still occur + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + }) + It("should not build anything if graph is nil", func() { + fakeProcessor.ProcessReturns(state.ClusterStateChange, nil) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) + Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + // status update for GatewayClass should not occur + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(0)) + }) + It("should update gateway class even if gateway is invalid", func() { + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway"}: { + Valid: false, + }, + }, + }) + + e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} + batch := []interface{}{e} + + handler.HandleEventBatch(context.Background(), logr.Discard(), batch) + + checkUpsertEventExpectations(e) + Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) + Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) + // status update should still occur for GatewayClasses + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) }) }) @@ -238,74 +275,12 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2, &graph.Gateway{}) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) }) }) }) - DescribeTable( - "updating statuses of GatewayClass conditionally based on handler configuration", - func(updateGatewayClassStatus bool) { - handler.cfg.updateGatewayClassStatus = updateGatewayClassStatus - - gc := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - } - ignoredGC := &gatewayv1.GatewayClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored", - }, - } - - gr := &graph.Graph{ - GatewayClass: &graph.GatewayClass{ - Source: gc, - Valid: true, - }, - IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ - client.ObjectKeyFromObject(ignoredGC): ignoredGC, - }, - } - - fakeProcessor.ProcessReturns(state.ClusterStateChange, gr) - fakeProcessor.GetLatestGraphReturns(gr) - - e := &events.UpsertEvent{ - Resource: &gatewayv1.HTTPRoute{}, // any supported is OK - } - - batch := []interface{}{e} - - var expectedReqsCount int - if updateGatewayClassStatus { - expectedReqsCount = 2 - } - - handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - - Eventually( - func() int { - return fakeStatusUpdater.UpdateGroupCallCount() - }).Should(Equal(2)) - - _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) - Expect(name).To(Equal(groupAllExceptGateways)) - Expect(reqs).To(HaveLen(expectedReqsCount)) - for _, req := range reqs { - Expect(req.NsName).To(BeElementOf( - client.ObjectKeyFromObject(gc), - client.ObjectKeyFromObject(ignoredGC), - )) - Expect(req.ResourceType).To(Equal(&gatewayv1.GatewayClass{})) - } - }, - Entry("should update statuses of GatewayClass", true), - Entry("should not update statuses of GatewayClass", false), - ) - When("receiving control plane configuration updates", func() { cfg := func(level ngfAPI.ControllerLogLevel) *ngfAPI.NginxGateway { return &ngfAPI.NginxGateway{ @@ -327,7 +302,11 @@ var _ = Describe("eventHandler", func() { Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(HaveLen(1)) @@ -342,7 +321,11 @@ var _ = Describe("eventHandler", func() { Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(HaveLen(1)) @@ -370,7 +353,11 @@ var _ = Describe("eventHandler", func() { Expect(handler.GetLatestConfiguration()).To(BeNil()) - Expect(fakeStatusUpdater.UpdateGroupCallCount()).To(Equal(1)) + Eventually( + func() int { + return fakeStatusUpdater.UpdateGroupCallCount() + }).Should(Equal(1)) + _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) Expect(reqs).To(BeEmpty()) @@ -392,7 +379,11 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} BeforeEach(func() { - fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{Gateway: &graph.Gateway{Valid: true}}) + fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: {Valid: true}, + }, + }) }) When("running NGINX Plus", func() { @@ -401,7 +392,7 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) dcfg.NginxPlus = dataplane.NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) @@ -414,7 +405,7 @@ var _ = Describe("eventHandler", func() { It("should not call the NGINX Plus API", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) @@ -427,8 +418,11 @@ var _ = Describe("eventHandler", func() { It("should update status when receiving a queue event", func() { obj := &status.QueueObject{ UpdateType: status.UpdateAll, - Deployment: types.NamespacedName{}, - Error: errors.New("status error"), + Deployment: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), + }, + Error: errors.New("status error"), } queue.Enqueue(obj) @@ -438,13 +432,17 @@ var _ = Describe("eventHandler", func() { }).Should(Equal(2)) gr := handler.cfg.processor.GetLatestGraph() - Expect(gr.LatestReloadResult.Error.Error()).To(Equal("status error")) + gw := gr.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway"}] + Expect(gw.LatestReloadResult.Error.Error()).To(Equal("status error")) }) It("should update Gateway status when receiving a queue event", func() { obj := &status.QueueObject{ - UpdateType: status.UpdateGateway, - Deployment: types.NamespacedName{}, + UpdateType: status.UpdateGateway, + Deployment: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway", "nginx"), + }, GatewayService: &v1.Service{}, } queue.Enqueue(obj) @@ -460,12 +458,16 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{Gateway: &graph.Gateway{Valid: true}}) + fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: {Valid: true}, + }, + }) Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) Expect(readyChannel).To(BeClosed()) diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 7b3d28f140..0c78a8f10d 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -245,21 +245,20 @@ func StartManager(cfg config.Config) error { &cfg.UsageReportConfig, cfg.Logger.WithName("generator"), ), - k8sClient: mgr.GetClient(), - k8sReader: mgr.GetAPIReader(), - logger: cfg.Logger.WithName("eventHandler"), - logLevelSetter: logLevelSetter, - eventRecorder: recorder, - deployCtxCollector: deployCtxCollector, - graphBuiltHealthChecker: healthChecker, - gatewayPodConfig: cfg.GatewayPodConfig, - controlConfigNSName: controlConfigNSName, - gatewayCtlrName: cfg.GatewayCtlrName, - gatewayClassName: cfg.GatewayClassName, - updateGatewayClassStatus: cfg.UpdateGatewayClassStatus, - plus: cfg.Plus, - statusQueue: statusQueue, - nginxDeployments: nginxUpdater.NginxDeployments, + k8sClient: mgr.GetClient(), + k8sReader: mgr.GetAPIReader(), + logger: cfg.Logger.WithName("eventHandler"), + logLevelSetter: logLevelSetter, + eventRecorder: recorder, + deployCtxCollector: deployCtxCollector, + graphBuiltHealthChecker: healthChecker, + gatewayPodConfig: cfg.GatewayPodConfig, + controlConfigNSName: controlConfigNSName, + gatewayCtlrName: cfg.GatewayCtlrName, + gatewayClassName: cfg.GatewayClassName, + plus: cfg.Plus, + statusQueue: statusQueue, + nginxDeployments: nginxUpdater.NginxDeployments, }) objects, objectLists := prepareFirstEventBatchPreparerArgs(cfg) @@ -435,12 +434,6 @@ func registerControllers( options := []controller.Option{ controller.WithK8sPredicate(k8spredicate.GenerationChangedPredicate{}), } - if cfg.GatewayNsName != nil { - options = append( - options, - controller.WithNamespacedNameFilter(filter.CreateSingleResourceFilter(*cfg.GatewayNsName)), - ) - } return options }(), }, @@ -778,16 +771,7 @@ func prepareFirstEventBatchPreparerArgs(cfg config.Config) ([]client.Object, []c ) } - gwNsName := cfg.GatewayNsName - - if gwNsName == nil { - objectLists = append(objectLists, &gatewayv1.GatewayList{}) - } else { - objects = append( - objects, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: gwNsName.Name, Namespace: gwNsName.Namespace}}, - ) - } + objectLists = append(objectLists, &gatewayv1.GatewayList{}) return objects, objectLists } diff --git a/internal/mode/static/manager_test.go b/internal/mode/static/manager_test.go index 98a6146905..9a9f0768b7 100644 --- a/internal/mode/static/manager_test.go +++ b/internal/mode/static/manager_test.go @@ -45,10 +45,9 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { cfg config.Config }{ { - name: "gwNsName is nil", + name: "base case", cfg: config.Config{ GatewayClassName: gcName, - GatewayNsName: nil, ExperimentalFeatures: false, SnippetsFilters: false, }, @@ -72,49 +71,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil", + name: "experimental enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, - ExperimentalFeatures: false, - SnippetsFilters: false, - }, - expectedObjects: []client.Object{ - &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, - }, - expectedObjectLists: []client.ObjectList{ - &apiv1.ServiceList{}, - &apiv1.SecretList{}, - &apiv1.NamespaceList{}, - &discoveryV1.EndpointSliceList{}, - &gatewayv1.HTTPRouteList{}, - &gatewayv1beta1.ReferenceGrantList{}, - &ngfAPIv1alpha2.NginxProxyList{}, - &gatewayv1.GRPCRouteList{}, - partialObjectMetadataList, - &ngfAPIv1alpha1.ClientSettingsPolicyList{}, - &ngfAPIv1alpha2.ObservabilityPolicyList{}, - &ngfAPIv1alpha1.UpstreamSettingsPolicyList{}, - }, - }, - { - name: "gwNsName is not nil and experimental enabled", - cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: true, SnippetsFilters: false, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -123,6 +87,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.ConfigMapList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, @@ -135,19 +100,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil and snippets filters enabled", + name: "snippets filters enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: false, SnippetsFilters: true, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -155,6 +115,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.NamespaceList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, @@ -166,19 +127,14 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { }, }, { - name: "gwNsName is not nil, experimental and snippets filters enabled", + name: "experimental and snippets filters enabled", cfg: config.Config{ - GatewayClassName: gcName, - GatewayNsName: &types.NamespacedName{ - Namespace: "test", - Name: "my-gateway", - }, + GatewayClassName: gcName, ExperimentalFeatures: true, SnippetsFilters: true, }, expectedObjects: []client.Object{ &gatewayv1.GatewayClass{ObjectMeta: metav1.ObjectMeta{Name: "nginx"}}, - &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "my-gateway", Namespace: "test"}}, }, expectedObjectLists: []client.ObjectList{ &apiv1.ServiceList{}, @@ -187,6 +143,7 @@ func TestPrepareFirstEventBatchPreparerArgs(t *testing.T) { &apiv1.ConfigMapList{}, &discoveryV1.EndpointSliceList{}, &gatewayv1.HTTPRouteList{}, + &gatewayv1.GatewayList{}, &gatewayv1beta1.ReferenceGrantList{}, &ngfAPIv1alpha2.NginxProxyList{}, partialObjectMetadataList, diff --git a/internal/mode/static/nginx/config/policies/clientsettings/validator.go b/internal/mode/static/nginx/config/policies/clientsettings/validator.go index 98198eb264..7c450b2379 100644 --- a/internal/mode/static/nginx/config/policies/clientsettings/validator.go +++ b/internal/mode/static/nginx/config/policies/clientsettings/validator.go @@ -25,7 +25,7 @@ func NewValidator(genericValidator validation.GenericValidator) *Validator { } // Validate validates the spec of a ClientSettingsPolicy. -func (v *Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { +func (v *Validator) Validate(policy policies.Policy) []conditions.Condition { csp := helpers.MustCastObject[*ngfAPI.ClientSettingsPolicy](policy) targetRefPath := field.NewPath("spec").Child("targetRef") @@ -43,6 +43,14 @@ func (v *Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) return nil } +// ValidateGlobalSettings validates a ClientSettingsPolicy with respect to the NginxProxy global settings. +func (v *Validator) ValidateGlobalSettings( + _ policies.Policy, + _ *policies.GlobalSettings, +) []conditions.Condition { + return nil +} + // Conflicts returns true if the two ClientSettingsPolicies conflict. func (v *Validator) Conflicts(polA, polB policies.Policy) bool { cspA := helpers.MustCastObject[*ngfAPI.ClientSettingsPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go b/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go index bce96d81c8..88b99ba292 100644 --- a/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go +++ b/internal/mode/static/nginx/config/policies/clientsettings/validator_test.go @@ -143,7 +143,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, nil) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -154,7 +154,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := clientsettings.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -162,6 +162,15 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + v := clientsettings.NewValidator(validation.GenericValidator{}) + + g.Expect(v.ValidateGlobalSettings(nil, nil)).To(BeNil()) +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/observability/validator.go b/internal/mode/static/nginx/config/policies/observability/validator.go index 4d7182e128..798b3099c1 100644 --- a/internal/mode/static/nginx/config/policies/observability/validator.go +++ b/internal/mode/static/nginx/config/policies/observability/validator.go @@ -25,24 +25,9 @@ func NewValidator(genericValidator validation.GenericValidator) *Validator { } // Validate validates the spec of an ObservabilityPolicy. -func (v *Validator) Validate( - policy policies.Policy, - globalSettings *policies.GlobalSettings, -) []conditions.Condition { +func (v *Validator) Validate(policy policies.Policy) []conditions.Condition { obs := helpers.MustCastObject[*ngfAPIv1alpha2.ObservabilityPolicy](policy) - if globalSettings == nil || !globalSettings.NginxProxyValid { - return []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - } - } - - if !globalSettings.TelemetryEnabled { - return []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), - } - } - targetRefPath := field.NewPath("spec").Child("targetRefs") supportedKinds := []gatewayv1.Kind{kinds.HTTPRoute, kinds.GRPCRoute} supportedGroups := []gatewayv1.Group{gatewayv1.GroupName} @@ -60,6 +45,26 @@ func (v *Validator) Validate( return nil } +// ValidateGlobalSettings validates an ObservabilityPolicy with respect to the NginxProxy global settings. +func (v *Validator) ValidateGlobalSettings( + _ policies.Policy, + globalSettings *policies.GlobalSettings, +) []conditions.Condition { + if globalSettings == nil { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), + } + } + + if !globalSettings.TelemetryEnabled { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + } + } + + return nil +} + // Conflicts returns true if the two ObservabilityPolicies conflict. func (v *Validator) Conflicts(polA, polB policies.Policy) bool { a := helpers.MustCastObject[*ngfAPIv1alpha2.ObservabilityPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/observability/validator_test.go b/internal/mode/static/nginx/config/policies/observability/validator_test.go index 5b0894110d..9736320545 100644 --- a/internal/mode/static/nginx/config/policies/observability/validator_test.go +++ b/internal/mode/static/nginx/config/policies/observability/validator_test.go @@ -54,47 +54,18 @@ func createModifiedPolicy(mod policyModFunc) *ngfAPIv1alpha2.ObservabilityPolicy func TestValidator_Validate(t *testing.T) { t.Parallel() - globalSettings := &policies.GlobalSettings{ - NginxProxyValid: true, - TelemetryEnabled: true, - } tests := []struct { - name string - policy *ngfAPIv1alpha2.ObservabilityPolicy - globalSettings *policies.GlobalSettings - expConditions []conditions.Condition + name string + policy *ngfAPIv1alpha2.ObservabilityPolicy + expConditions []conditions.Condition }{ - { - name: "validation context is nil", - policy: createValidPolicy(), - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - }, - }, - { - name: "validation context is invalid", - policy: createValidPolicy(), - globalSettings: &policies.GlobalSettings{NginxProxyValid: false}, - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), - }, - }, - { - name: "telemetry is not enabled", - policy: createValidPolicy(), - globalSettings: &policies.GlobalSettings{NginxProxyValid: true, TelemetryEnabled: false}, - expConditions: []conditions.Condition{ - staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), - }, - }, { name: "invalid target ref; unsupported group", policy: createModifiedPolicy(func(p *ngfAPIv1alpha2.ObservabilityPolicy) *ngfAPIv1alpha2.ObservabilityPolicy { p.Spec.TargetRefs[0].Group = "Unsupported" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.targetRefs.group: Unsupported value: \"Unsupported\": " + "supported values: \"gateway.networking.k8s.io\""), @@ -106,7 +77,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.TargetRefs[0].Kind = "Unsupported" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.targetRefs.kind: Unsupported value: \"Unsupported\": " + "supported values: \"HTTPRoute\", \"GRPCRoute\""), @@ -118,7 +88,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.Strategy = "invalid" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.strategy: Unsupported value: \"invalid\": " + "supported values: \"ratio\", \"parent\""), @@ -130,7 +99,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.Context = helpers.GetPointer[ngfAPIv1alpha2.TraceContext]("invalid") return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.context: Unsupported value: \"invalid\": " + "supported values: \"extract\", \"inject\", \"propagate\", \"ignore\""), @@ -142,7 +110,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanName = helpers.GetPointer("invalid$$$") return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanName: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -155,7 +122,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanAttributes[0].Key = "invalid$$$" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanAttributes.key: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -168,7 +134,6 @@ func TestValidator_Validate(t *testing.T) { p.Spec.Tracing.SpanAttributes[0].Value = "invalid$$$" return p }), - globalSettings: globalSettings, expConditions: []conditions.Condition{ staticConds.NewPolicyInvalid("spec.tracing.spanAttributes.value: Invalid value: \"invalid$$$\": " + "a valid value must have all '\"' escaped and must not contain any '$' or end with an " + @@ -176,10 +141,9 @@ func TestValidator_Validate(t *testing.T) { }, }, { - name: "valid", - policy: createValidPolicy(), - globalSettings: globalSettings, - expConditions: nil, + name: "valid", + policy: createValidPolicy(), + expConditions: nil, }, } @@ -190,7 +154,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, test.globalSettings) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -201,7 +165,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := observability.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -209,6 +173,49 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + globalSettings *policies.GlobalSettings + expConditions []conditions.Condition + }{ + { + name: "global settings are nil", + expConditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageNginxProxyInvalid), + }, + }, + { + name: "telemetry is not enabled", + globalSettings: &policies.GlobalSettings{TelemetryEnabled: false}, + expConditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + { + name: "valid", + globalSettings: &policies.GlobalSettings{ + TelemetryEnabled: true, + }, + expConditions: nil, + }, + } + + v := observability.NewValidator(validation.GenericValidator{}) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + conds := v.ValidateGlobalSettings(nil, test.globalSettings) + g.Expect(conds).To(Equal(test.expConditions)) + }) + } +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go b/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go index 43cfb7e87a..598c982837 100644 --- a/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go +++ b/internal/mode/static/nginx/config/policies/policiesfakes/fake_validator.go @@ -21,11 +21,10 @@ type FakeValidator struct { conflictsReturnsOnCall map[int]struct { result1 bool } - ValidateStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + ValidateStub func(policies.Policy) []conditions.Condition validateMutex sync.RWMutex validateArgsForCall []struct { arg1 policies.Policy - arg2 *policies.GlobalSettings } validateReturns struct { result1 []conditions.Condition @@ -33,6 +32,18 @@ type FakeValidator struct { validateReturnsOnCall map[int]struct { result1 []conditions.Condition } + ValidateGlobalSettingsStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + validateGlobalSettingsMutex sync.RWMutex + validateGlobalSettingsArgsForCall []struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + } + validateGlobalSettingsReturns struct { + result1 []conditions.Condition + } + validateGlobalSettingsReturnsOnCall map[int]struct { + result1 []conditions.Condition + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -99,19 +110,18 @@ func (fake *FakeValidator) ConflictsReturnsOnCall(i int, result1 bool) { }{result1} } -func (fake *FakeValidator) Validate(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { +func (fake *FakeValidator) Validate(arg1 policies.Policy) []conditions.Condition { fake.validateMutex.Lock() ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] fake.validateArgsForCall = append(fake.validateArgsForCall, struct { arg1 policies.Policy - arg2 *policies.GlobalSettings - }{arg1, arg2}) + }{arg1}) stub := fake.ValidateStub fakeReturns := fake.validateReturns - fake.recordInvocation("Validate", []interface{}{arg1, arg2}) + fake.recordInvocation("Validate", []interface{}{arg1}) fake.validateMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1) } if specificReturn { return ret.result1 @@ -125,17 +135,17 @@ func (fake *FakeValidator) ValidateCallCount() int { return len(fake.validateArgsForCall) } -func (fake *FakeValidator) ValidateCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { +func (fake *FakeValidator) ValidateCalls(stub func(policies.Policy) []conditions.Condition) { fake.validateMutex.Lock() defer fake.validateMutex.Unlock() fake.ValidateStub = stub } -func (fake *FakeValidator) ValidateArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { +func (fake *FakeValidator) ValidateArgsForCall(i int) policies.Policy { fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() argsForCall := fake.validateArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakeValidator) ValidateReturns(result1 []conditions.Condition) { @@ -161,6 +171,68 @@ func (fake *FakeValidator) ValidateReturnsOnCall(i int, result1 []conditions.Con }{result1} } +func (fake *FakeValidator) ValidateGlobalSettings(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { + fake.validateGlobalSettingsMutex.Lock() + ret, specificReturn := fake.validateGlobalSettingsReturnsOnCall[len(fake.validateGlobalSettingsArgsForCall)] + fake.validateGlobalSettingsArgsForCall = append(fake.validateGlobalSettingsArgsForCall, struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + }{arg1, arg2}) + stub := fake.ValidateGlobalSettingsStub + fakeReturns := fake.validateGlobalSettingsReturns + fake.recordInvocation("ValidateGlobalSettings", []interface{}{arg1, arg2}) + fake.validateGlobalSettingsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeValidator) ValidateGlobalSettingsCallCount() int { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + return len(fake.validateGlobalSettingsArgsForCall) +} + +func (fake *FakeValidator) ValidateGlobalSettingsCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = stub +} + +func (fake *FakeValidator) ValidateGlobalSettingsArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + argsForCall := fake.validateGlobalSettingsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeValidator) ValidateGlobalSettingsReturns(result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + fake.validateGlobalSettingsReturns = struct { + result1 []conditions.Condition + }{result1} +} + +func (fake *FakeValidator) ValidateGlobalSettingsReturnsOnCall(i int, result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + if fake.validateGlobalSettingsReturnsOnCall == nil { + fake.validateGlobalSettingsReturnsOnCall = make(map[int]struct { + result1 []conditions.Condition + }) + } + fake.validateGlobalSettingsReturnsOnCall[i] = struct { + result1 []conditions.Condition + }{result1} +} + func (fake *FakeValidator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -168,6 +240,8 @@ func (fake *FakeValidator) Invocations() map[string][][]interface{} { defer fake.conflictsMutex.RUnlock() fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/nginx/config/policies/policy.go b/internal/mode/static/nginx/config/policies/policy.go index d65a375db8..93d6054155 100644 --- a/internal/mode/static/nginx/config/policies/policy.go +++ b/internal/mode/static/nginx/config/policies/policy.go @@ -24,8 +24,6 @@ type Policy interface { // GlobalSettings contains global settings from the current state of the graph that may be // needed for policy validation or generation if certain policies rely on those global settings. type GlobalSettings struct { - // NginxProxyValid is whether the NginxProxy resource is valid. - NginxProxyValid bool // TelemetryEnabled is whether telemetry is enabled in the NginxProxy resource. TelemetryEnabled bool } diff --git a/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go b/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go index c3c0a1af5b..aaabcbebc9 100644 --- a/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go +++ b/internal/mode/static/nginx/config/policies/upstreamsettings/validator.go @@ -25,7 +25,7 @@ func NewValidator(genericValidator validation.GenericValidator) Validator { } // Validate validates the spec of an UpstreamsSettingsPolicy. -func (v Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { +func (v Validator) Validate(policy policies.Policy) []conditions.Condition { usp := helpers.MustCastObject[*ngfAPI.UpstreamSettingsPolicy](policy) targetRefsPath := field.NewPath("spec").Child("targetRefs") @@ -46,6 +46,14 @@ func (v Validator) Validate(policy policies.Policy, _ *policies.GlobalSettings) return nil } +// ValidateGlobalSettings validates an UpstreamSettingsPolicy with respect to the NginxProxy global settings. +func (v Validator) ValidateGlobalSettings( + _ policies.Policy, + _ *policies.GlobalSettings, +) []conditions.Condition { + return nil +} + // Conflicts returns true if the two UpstreamsSettingsPolicies conflict. func (v Validator) Conflicts(polA, polB policies.Policy) bool { cspA := helpers.MustCastObject[*ngfAPI.UpstreamSettingsPolicy](polA) diff --git a/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go b/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go index e34f4738e0..85699ea297 100644 --- a/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go +++ b/internal/mode/static/nginx/config/policies/upstreamsettings/validator_test.go @@ -132,7 +132,7 @@ func TestValidator_Validate(t *testing.T) { t.Parallel() g := NewWithT(t) - conds := v.Validate(test.policy, nil) + conds := v.Validate(test.policy) g.Expect(conds).To(Equal(test.expConditions)) }) } @@ -143,7 +143,7 @@ func TestValidator_ValidatePanics(t *testing.T) { v := upstreamsettings.NewValidator(nil) validate := func() { - _ = v.Validate(&policiesfakes.FakePolicy{}, nil) + _ = v.Validate(&policiesfakes.FakePolicy{}) } g := NewWithT(t) @@ -151,6 +151,15 @@ func TestValidator_ValidatePanics(t *testing.T) { g.Expect(validate).To(Panic()) } +func TestValidator_ValidateGlobalSettings(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + v := upstreamsettings.NewValidator(validation.GenericValidator{}) + + g.Expect(v.ValidateGlobalSettings(nil, nil)).To(BeNil()) +} + func TestValidator_Conflicts(t *testing.T) { t.Parallel() tests := []struct { diff --git a/internal/mode/static/nginx/config/policies/validator.go b/internal/mode/static/nginx/config/policies/validator.go index 0bb7b58924..e618eeea17 100644 --- a/internal/mode/static/nginx/config/policies/validator.go +++ b/internal/mode/static/nginx/config/policies/validator.go @@ -16,7 +16,9 @@ import ( //counterfeiter:generate . Validator type Validator interface { // Validate validates an NGF Policy. - Validate(policy Policy, globalSettings *GlobalSettings) []conditions.Condition + Validate(policy Policy) []conditions.Condition + // ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. + ValidateGlobalSettings(policy Policy, globalSettings *GlobalSettings) []conditions.Condition // Conflicts returns true if the two Policies conflict. Conflicts(a, b Policy) bool } @@ -54,7 +56,7 @@ func NewManager( } // Validate validates the policy. -func (m *CompositeValidator) Validate(policy Policy, globalSettings *GlobalSettings) []conditions.Condition { +func (m *CompositeValidator) Validate(policy Policy) []conditions.Condition { gvk := m.mustExtractGVK(policy) validator, ok := m.validators[gvk] @@ -62,7 +64,22 @@ func (m *CompositeValidator) Validate(policy Policy, globalSettings *GlobalSetti panic(fmt.Sprintf("no validator registered for policy %T", policy)) } - return validator.Validate(policy, globalSettings) + return validator.Validate(policy) +} + +// ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. +func (m *CompositeValidator) ValidateGlobalSettings( + policy Policy, + globalSettings *GlobalSettings, +) []conditions.Condition { + gvk := m.mustExtractGVK(policy) + + validator, ok := m.validators[gvk] + if !ok { + panic(fmt.Sprintf("no validator registered for policy %T", policy)) + } + + return validator.ValidateGlobalSettings(policy, globalSettings) } // Conflicts returns true if the policies conflict. diff --git a/internal/mode/static/nginx/config/policies/validator_test.go b/internal/mode/static/nginx/config/policies/validator_test.go index 81b1ee87c8..6787d5360b 100644 --- a/internal/mode/static/nginx/config/policies/validator_test.go +++ b/internal/mode/static/nginx/config/policies/validator_test.go @@ -27,6 +27,13 @@ var _ = Describe("Policy CompositeValidator", func() { }, } + bananaGVK := schema.GroupVersionKind{Group: "fruit", Version: "1", Kind: "banana"} + bananaPolicy := &policiesfakes.FakePolicy{ + GetNameStub: func() string { + return "banana" + }, + } + mustExtractGVK := func(object client.Object) schema.GroupVersionKind { switch object.GetName() { case "apple": @@ -42,34 +49,54 @@ var _ = Describe("Policy CompositeValidator", func() { mustExtractGVK, policies.ManagerConfig{ Validator: &policiesfakes.FakeValidator{ - ValidateStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + ValidateStub: func(_ policies.Policy) []conditions.Condition { return []conditions.Condition{staticConds.NewPolicyInvalid("apple error")} }, + ValidateGlobalSettingsStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + return []conditions.Condition{staticConds.NewPolicyInvalid("apple global settings error")} + }, ConflictsStub: func(_ policies.Policy, _ policies.Policy) bool { return true }, }, GVK: appleGVK, }, policies.ManagerConfig{ Validator: &policiesfakes.FakeValidator{ - ValidateStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + ValidateStub: func(_ policies.Policy) []conditions.Condition { return []conditions.Condition{staticConds.NewPolicyInvalid("orange error")} }, + ValidateGlobalSettingsStub: func(_ policies.Policy, _ *policies.GlobalSettings) []conditions.Condition { + return []conditions.Condition{staticConds.NewPolicyInvalid("orange global settings error")} + }, ConflictsStub: func(_ policies.Policy, _ policies.Policy) bool { return false }, }, GVK: orangeGVK, }, + policies.ManagerConfig{ + Validator: &policiesfakes.FakeValidator{}, + GVK: bananaGVK, + }, ) Context("Validation", func() { When("Policy is registered with manager", func() { It("Validates the policy", func() { - conds := mgr.Validate(applePolicy, nil) + globalSettings := &policies.GlobalSettings{} + + conds := mgr.Validate(applePolicy) Expect(conds).To(HaveLen(1)) Expect(conds[0].Message).To(Equal("apple error")) - conds = mgr.Validate(orangePolicy, nil) + conds = mgr.ValidateGlobalSettings(applePolicy, globalSettings) + Expect(conds).To(HaveLen(1)) + Expect(conds[0].Message).To(Equal("apple global settings error")) + + conds = mgr.Validate(orangePolicy) Expect(conds).To(HaveLen(1)) Expect(conds[0].Message).To(Equal("orange error")) + + conds = mgr.ValidateGlobalSettings(orangePolicy, globalSettings) + Expect(conds).To(HaveLen(1)) + Expect(conds[0].Message).To(Equal("orange global settings error")) }) It("Returns whether the policies conflict", func() { Expect(mgr.Conflicts(applePolicy, applePolicy)).To(BeTrue()) @@ -79,7 +106,7 @@ var _ = Describe("Policy CompositeValidator", func() { When("Policy is not registered with manager", func() { It("Panics on call to validate", func() { validate := func() { - _ = mgr.Validate(&policiesfakes.FakePolicy{}, nil) + _ = mgr.Validate(&policiesfakes.FakePolicy{}) } Expect(validate).To(Panic()) @@ -89,6 +116,13 @@ var _ = Describe("Policy CompositeValidator", func() { _ = mgr.Conflicts(&policiesfakes.FakePolicy{}, &policiesfakes.FakePolicy{}) } + Expect(conflict).To(Panic()) + }) + It("panics on call to conflicts when no validator is registered for policy", func() { + conflict := func() { + _ = mgr.Conflicts(bananaPolicy, bananaPolicy) + } + Expect(conflict).To(Panic()) }) }) diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index 7c058bf784..f925b9c133 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -423,9 +423,12 @@ func buildNginxService( serviceType = corev1.ServiceType(*serviceCfg.ServiceType) } - servicePolicy := defaultServicePolicy - if serviceCfg.ExternalTrafficPolicy != nil { - servicePolicy = corev1.ServiceExternalTrafficPolicy(*serviceCfg.ExternalTrafficPolicy) + var servicePolicy corev1.ServiceExternalTrafficPolicyType + if serviceType != corev1.ServiceTypeClusterIP { + servicePolicy = defaultServicePolicy + if serviceCfg.ExternalTrafficPolicy != nil { + servicePolicy = corev1.ServiceExternalTrafficPolicy(*serviceCfg.ExternalTrafficPolicy) + } } servicePorts := make([]corev1.ServicePort, 0, len(ports)) @@ -593,6 +596,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( {MountPath: "/var/run/secrets/ngf", Name: "nginx-agent-tls"}, {MountPath: "/var/run/secrets/ngf/serviceaccount", Name: "token"}, {MountPath: "/var/log/nginx-agent", Name: "nginx-agent-log"}, + {MountPath: "/var/lib/nginx-agent", Name: "nginx-agent-lib"}, {MountPath: "/etc/nginx/conf.d", Name: "nginx-conf"}, {MountPath: "/etc/nginx/stream-conf.d", Name: "nginx-stream-conf"}, {MountPath: "/etc/nginx/main-includes", Name: "nginx-main-includes"}, @@ -687,6 +691,7 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( }, }, {Name: "nginx-agent-log", VolumeSource: emptyDirVolumeSource}, + {Name: "nginx-agent-lib", VolumeSource: emptyDirVolumeSource}, {Name: "nginx-conf", VolumeSource: emptyDirVolumeSource}, {Name: "nginx-stream-conf", VolumeSource: emptyDirVolumeSource}, {Name: "nginx-main-includes", VolumeSource: emptyDirVolumeSource}, diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index 0b1fef6057..76055298f0 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -58,18 +58,14 @@ func createHTTPRoute( CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpListenerName)), }, { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpsListenerName)), }, }, }, @@ -109,18 +105,14 @@ func createGRPCRoute( CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpListenerName)), }, { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(httpsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(httpsListenerName)), }, }, }, @@ -156,11 +148,9 @@ func createTLSRoute(name, gateway, hostname string, backendRefs ...v1.BackendRef CommonRouteSpec: v1.CommonRouteSpec{ ParentRefs: []v1.ParentReference{ { - Namespace: (*v1.Namespace)(helpers.GetPointer("test")), - Name: v1.ObjectName(gateway), - SectionName: (*v1.SectionName)( - helpers.GetPointer(tlsListenerName), - ), + Namespace: (*v1.Namespace)(helpers.GetPointer("test")), + Name: v1.ObjectName(gateway), + SectionName: (*v1.SectionName)(helpers.GetPointer(tlsListenerName)), }, }, }, @@ -178,7 +168,7 @@ func createTLSRoute(name, gateway, hostname string, backendRefs ...v1.BackendRef func createHTTPListener() v1.Listener { return v1.Listener{ - Name: httpListenerName, + Name: v1.SectionName(httpListenerName), Hostname: nil, Port: 80, Protocol: v1.HTTPProtocolType, @@ -428,15 +418,15 @@ var _ = Describe("ChangeProcessor", func() { hr1, hr1Updated, hr2 *v1.HTTPRoute gr1, gr1Updated, gr2 *v1.GRPCRoute tr1, tr1Updated, tr2 *v1alpha2.TLSRoute - gw1, gw1Updated, gw2 *v1.Gateway + gw1, gw1Updated, gw2, gw2Updated *v1.Gateway secretRefGrant, hrServiceRefGrant *v1beta1.ReferenceGrant grServiceRefGrant, trServiceRefGrant *v1beta1.ReferenceGrant - expGraph *graph.Graph + expGraph, expGraph2 *graph.Graph expRouteHR1, expRouteHR2 *graph.L7Route expRouteGR1, expRouteGR2 *graph.L7Route expRouteTR1, expRouteTR2 *graph.L4Route gatewayAPICRD, gatewayAPICRDUpdated *metav1.PartialObjectMetadata - httpRouteKey1, httpRouteKey2, grpcRouteKey1, grpcRouteKey2 graph.RouteKey + httpRouteKey1, httpRouteKey2, grpcRouteKey1, grpcRouteKey2 graph.RouteKey // gitleaks:allow not a secret trKey1, trKey2 graph.L4RouteKey refSvc, refGRPCSvc, refTLSSvc types.NamespacedName ) @@ -482,14 +472,16 @@ var _ = Describe("ChangeProcessor", func() { httpRouteKey1 = graph.CreateRouteKey(hr1) hr1Updated = hr1.DeepCopy() hr1Updated.Generation++ - hr2 = createHTTPRoute("hr-2", "gateway-2", "bar.example.com") + + hr2 = createHTTPRoute("hr-2", "gateway-2", "bar.example.com", crossNsHTTPBackendRef) httpRouteKey2 = graph.CreateRouteKey(hr2) gr1 = createGRPCRoute("gr-1", "gateway-1", "foo.example.com", grpcBackendRef) grpcRouteKey1 = graph.CreateRouteKey(gr1) gr1Updated = gr1.DeepCopy() gr1Updated.Generation++ - gr2 = createGRPCRoute("gr-2", "gateway-2", "bar.example.com") + + gr2 = createGRPCRoute("gr-2", "gateway-2", "bar.example.com", grpcBackendRef) grpcRouteKey2 = graph.CreateRouteKey(gr2) tlsBackendRef := createTLSBackendRef(refTLSSvc.Name, refTLSSvc.Namespace) @@ -497,6 +489,7 @@ var _ = Describe("ChangeProcessor", func() { trKey1 = graph.CreateRouteKeyL4(tr1) tr1Updated = tr1.DeepCopy() tr1Updated.Generation++ + tr2 = createTLSRoute("tr-2", "gateway-2", "bar.tls.com", tlsBackendRef) trKey2 = graph.CreateRouteKeyL4(tr2) @@ -645,6 +638,9 @@ var _ = Describe("ChangeProcessor", func() { createTLSListener(tlsListenerName), ) + gw2Updated = gw2.DeepCopy() + gw2Updated.Generation++ + gatewayAPICRD = &metav1.PartialObjectMetadata{ TypeMeta: metav1.TypeMeta{ Kind: "CustomResourceDefinition", @@ -668,20 +664,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: hr1.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, Idx: 1, SectionName: hr1.Spec.ParentRefs[1].SectionName, }, @@ -692,8 +702,9 @@ var _ = Describe("ChangeProcessor", func() { { BackendRefs: []graph.BackendRef{ { - SvcNsName: refSvc, - Weight: 1, + SvcNsName: refSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, ValidMatches: true, @@ -721,20 +732,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: hr2.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpsListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, Idx: 1, SectionName: hr2.Spec.ParentRefs[1].SectionName, }, @@ -743,18 +768,30 @@ var _ = Describe("ChangeProcessor", func() { Hostnames: hr2.Spec.Hostnames, Rules: []graph.RouteRule{ { + BackendRefs: []graph.BackendRef{ + { + SvcNsName: refSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + }, + }, ValidMatches: true, Filters: graph.RouteRuleFilters{ Valid: true, Filters: []graph.Filter{}, }, Matches: hr2.Spec.Rules[0].Matches, - RouteBackendRefs: []graph.RouteBackendRef{}, + RouteBackendRefs: createRouteBackendRefs(hr2.Spec.Rules[0].BackendRefs), }, }, }, Valid: true, Attachable: true, + Conditions: []conditions.Condition{ + staticConds.NewRouteBackendRefRefBackendNotFound( + "spec.rules[0].backendRefs[0].name: Not found: \"service\"", + ), + }, } expRouteGR1 = &graph.L7Route{ @@ -763,20 +800,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: gr1.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"foo.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, Idx: 1, SectionName: gr1.Spec.ParentRefs[1].SectionName, }, @@ -787,8 +838,9 @@ var _ = Describe("ChangeProcessor", func() { { BackendRefs: []graph.BackendRef{ { - SvcNsName: refGRPCSvc, - Weight: 1, + SvcNsName: refGRPCSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, ValidMatches: true, @@ -816,20 +868,34 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 80, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 80, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: gr2.Spec.ParentRefs[0].SectionName, }, { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{httpsListenerName: {"bar.example.com"}}, - Attached: true, - ListenerPort: 443, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + httpsListenerName, + ): {"bar.example.com"}, + }, + Attached: true, + ListenerPort: 443, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, Idx: 1, SectionName: gr2.Spec.ParentRefs[1].SectionName, }, @@ -838,18 +904,30 @@ var _ = Describe("ChangeProcessor", func() { Hostnames: gr2.Spec.Hostnames, Rules: []graph.RouteRule{ { + BackendRefs: []graph.BackendRef{ + { + SvcNsName: refGRPCSvc, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + }, + }, ValidMatches: true, Filters: graph.RouteRuleFilters{ Valid: true, Filters: []graph.Filter{}, }, Matches: graph.ConvertGRPCMatches(gr2.Spec.Rules[0].Matches), - RouteBackendRefs: []graph.RouteBackendRef{}, + RouteBackendRefs: createGRPCRouteBackendRefs(gr2.Spec.Rules[0].BackendRefs), }, }, }, Valid: true, Attachable: true, + Conditions: []conditions.Condition{ + staticConds.NewRouteBackendRefRefBackendNotFound( + "spec.rules[0].backendRefs[0].name: Not found: \"grpc-service\"", + ), + }, } expRouteTR1 = &graph.L4Route{ @@ -857,18 +935,26 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{tlsListenerName: {"foo.tls.com"}}, - Attached: true, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + tlsListenerName, + ): {"foo.tls.com"}, + }, + Attached: true, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-1"}, SectionName: tr1.Spec.ParentRefs[0].SectionName, }, }, Spec: graph.L4RouteSpec{ Hostnames: tr1.Spec.Hostnames, BackendRef: graph.BackendRef{ - SvcNsName: refTLSSvc, - Valid: false, + SvcNsName: refTLSSvc, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Valid: true, @@ -885,18 +971,26 @@ var _ = Describe("ChangeProcessor", func() { ParentRefs: []graph.ParentRef{ { Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{tlsListenerName: {"bar.tls.com"}}, - Attached: true, + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw2), + tlsListenerName, + ): {"bar.tls.com"}, + }, + Attached: true, + }, + Gateway: &graph.ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw2), }, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway-2"}, SectionName: tr2.Spec.ParentRefs[0].SectionName, }, }, Spec: graph.L4RouteSpec{ Hostnames: tr2.Spec.Hostnames, BackendRef: graph.BackendRef{ - SvcNsName: refTLSSvc, - Valid: false, + SvcNsName: refTLSSvc, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Valid: true, @@ -915,60 +1009,200 @@ var _ = Describe("ChangeProcessor", func() { Source: gc, Valid: true, }, - Gateway: &graph.Gateway{ - Source: gw1, - Listeners: []*graph.Listener{ - { - Name: httpListenerName, - Source: gw1.Spec.Listeners[0], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-1"}: { + Source: gw1, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, - }, - { - Name: httpsListenerName, - Source: gw1.Spec.Listeners[1], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, - }, - { - Name: tlsListenerName, - Source: gw1.Spec.Listeners[2], - Valid: true, - Attachable: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, - L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-test-class", + }, }, - Valid: true, }, - IgnoredGateways: map[types.NamespacedName]*v1.Gateway{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, ReferencedSecrets: map[types.NamespacedName]*graph.Secret{}, ReferencedServices: map[types.NamespacedName]*graph.ReferencedService{ - refSvc: {}, - refTLSSvc: {}, - refGRPCSvc: {}, + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, }, - DeploymentName: types.NamespacedName{ - Namespace: "test", - Name: "gateway-1-test-class", + } + + expGraph2 = &graph.Graph{ + GatewayClass: &graph.GatewayClass{ + Source: gc, + Valid: true, + }, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-1"}: { + Source: gw1, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(diffNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw1), + Source: gw1.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-test-class", + }, + }, + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + }, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{trKey1: expRouteTR1}, + Routes: map[graph.RouteKey]*graph.L7Route{httpRouteKey1: expRouteHR1, grpcRouteKey1: expRouteGR1}, + ReferencedSecrets: map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + client.ObjectKeyFromObject(diffNsTLSSecret): { + Source: diffNsTLSSecret, + CertBundle: diffNsTLSCert, + }, + }, + ReferencedServices: map[types.NamespacedName]*graph.ReferencedService{ + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: "test", Name: "gateway-1"}: {}}, + }, }, } }) @@ -1026,9 +1260,10 @@ var _ = Describe("ChangeProcessor", func() { expGraph.GatewayClass = nil - expGraph.Gateway.Conditions = staticConds.NewGatewayInvalid("GatewayClass doesn't exist") - expGraph.Gateway.Valid = false - expGraph.Gateway.Listeners = nil + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + gw.Conditions = staticConds.NewGatewayInvalid("GatewayClass doesn't exist") + gw.Valid = false + gw.Listeners = nil // no ref grant exists yet for the routes expGraph.Routes[httpRouteKey1].Conditions = []conditions.Condition{ @@ -1055,23 +1290,23 @@ var _ = Describe("ChangeProcessor", func() { // gateway class does not exist so routes cannot attach expGraph.Routes[httpRouteKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[httpRouteKey1].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[grpcRouteKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.Routes[grpcRouteKey1].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.L4Routes[trKey1].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } expGraph.ReferencedSecrets = nil @@ -1092,7 +1327,8 @@ var _ = Describe("ChangeProcessor", func() { // No ref grant exists yet for gw1 // so the listener is not valid, but still attachable - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Valid = false listener443.ResolvedSecret = nil listener443.Conditions = staticConds.NewListenerRefNotPermitted( @@ -1101,7 +1337,10 @@ var _ = Describe("ChangeProcessor", func() { expAttachment80 := &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - httpListenerName: {"foo.example.com"}, + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpListenerName, + ): {"foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -1109,13 +1348,16 @@ var _ = Describe("ChangeProcessor", func() { expAttachment443 := &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - httpsListenerName: {"foo.example.com"}, + graph.CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1), + httpsListenerName, + ): {"foo.example.com"}, }, Attached: true, ListenerPort: 443, } - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[httpRouteKey1].ParentRefs[0].Attachment = expAttachment80 listener443.Routes[httpRouteKey1].ParentRefs[1].Attachment = expAttachment443 listener80.Routes[grpcRouteKey1].ParentRefs[0].Attachment = expAttachment80 @@ -1123,22 +1365,22 @@ var _ = Describe("ChangeProcessor", func() { // no ref grant exists yet for hr1 expGraph.Routes[httpRouteKey1].Conditions = []conditions.Condition{ - staticConds.NewRouteInvalidListener(), staticConds.NewRouteBackendRefRefNotPermitted( "spec.rules[0].backendRefs[0].namespace: Forbidden: Backend ref to Service " + "service-ns/service not permitted by any ReferenceGrant", ), + staticConds.NewRouteInvalidListener(), } expGraph.Routes[httpRouteKey1].ParentRefs[0].Attachment = expAttachment80 expGraph.Routes[httpRouteKey1].ParentRefs[1].Attachment = expAttachment443 // no ref grant exists yet for gr1 expGraph.Routes[grpcRouteKey1].Conditions = []conditions.Condition{ - staticConds.NewRouteInvalidListener(), staticConds.NewRouteBackendRefRefNotPermitted( "spec.rules[0].backendRefs[0].namespace: Forbidden: Backend ref to Service " + "grpc-service-ns/grpc-service not permitted by any ReferenceGrant", ), + staticConds.NewRouteInvalidListener(), } expGraph.Routes[grpcRouteKey1].ParentRefs[0].Attachment = expAttachment80 expGraph.Routes[grpcRouteKey1].ParentRefs[1].Attachment = expAttachment443 @@ -1343,10 +1585,11 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(hr1Updated) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Routes[httpRouteKey1].Source.SetGeneration(hr1Updated.Generation) - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[httpRouteKey1].Source.SetGeneration(hr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, @@ -1361,10 +1604,11 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gr1Updated) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + listener443 := getListenerByName(gw, httpsListenerName) listener443.Routes[grpcRouteKey1].Source.SetGeneration(gr1Updated.Generation) - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) + listener80 := getListenerByName(gw, httpListenerName) listener80.Routes[grpcRouteKey1].Source.SetGeneration(gr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, @@ -1378,7 +1622,8 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(tr1Updated) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + tlsListener := getListenerByName(gw, tlsListenerName) tlsListener.L4Routes[trKey1].Source.SetGeneration(tr1Updated.Generation) expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ @@ -1393,7 +1638,8 @@ var _ = Describe("ChangeProcessor", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gw1Updated) - expGraph.Gateway.Source.Generation = gw1Updated.Generation + gw := expGraph.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-1"}] + gw.Source.Generation = gw1Updated.Generation expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ Source: diffNsTLSSecret, CertBundle: diffNsTLSCert, @@ -1456,118 +1702,118 @@ var _ = Describe("ChangeProcessor", func() { }) }) When("the second Gateway is upserted", func() { - It("returns populated graph using first gateway", func() { + It("returns populated graph with second gateway", func() { processor.CaptureUpsertChange(gw2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, - } - - processAndValidateGraph(expGraph) + processAndValidateGraph(expGraph2) }) }) When("the second HTTPRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(hr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, + expGraph2.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ + Source: diffNsTLSSecret, + CertBundle: diffNsTLSCert, } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the second GRPCRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(gr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.Routes[grpcRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, + grpcRouteKey2: expRouteGR2, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refGRPCSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the second TLSRoute is upserted", func() { It("returns populated graph", func() { processor.CaptureUpsertChange(tr2) - expGraph.IgnoredGateways = map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: gw2, - } - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[httpRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - } - expGraph.Routes[httpRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + gw2NSName := client.ObjectKeyFromObject(gw2) + gw := expGraph2.Gateways[gw2NSName] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.Routes[grpcRouteKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, } - expGraph.Routes[grpcRouteKey2].ParentRefs[1].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, } - expGraph.L4Routes[trKey2] = expRouteTR2 - expGraph.L4Routes[trKey2].ParentRefs[0].Attachment = &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + httpRouteKey1: expRouteHR1, + grpcRouteKey1: expRouteGR1, + grpcRouteKey2: expRouteGR2, } - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(diffNsTLSSecret)] = &graph.Secret{ - Source: diffNsTLSSecret, - CertBundle: diffNsTLSCert, + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey1: expRouteTR1, + trKey2: expRouteTR2, } - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices[refSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refGRPCSvc].GatewayNsNames[gw2NSName] = struct{}{} + expGraph2.ReferencedServices[refTLSSvc].GatewayNsNames[gw2NSName] = struct{}{} + + processAndValidateGraph(expGraph2) }) }) When("the first Gateway is deleted", func() { @@ -1577,51 +1823,112 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "gateway-1"}, ) - // gateway 2 takes over; - expGraph.DeploymentName.Name = "gateway-2-test-class" - // route 1 has been replaced by route 2 - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] - - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) - - listener80.Routes[httpRouteKey2] = expRouteHR2 - listener443.Routes[httpRouteKey2] = expRouteHR2 - listener80.Routes[grpcRouteKey2] = expRouteGR2 - listener443.Routes[grpcRouteKey2] = expRouteGR2 - tlsListener.L4Routes[trKey2] = expRouteTR2 - - delete(expGraph.Routes, httpRouteKey1) - delete(expGraph.Routes, grpcRouteKey1) - delete(expGraph.L4Routes, trKey1) - - expGraph.Routes[httpRouteKey2] = expRouteHR2 - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - expGraph.L4Routes[trKey2] = expRouteTR2 - - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, - } - - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - processAndValidateGraph(expGraph) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + httpRouteKey2: expRouteHR2, + grpcRouteKey2: expRouteGR2, + } + + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + + processAndValidateGraph(expGraph2) }) }) When("the second HTTPRoute is deleted", func() { @@ -1631,50 +1938,103 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "hr-2"}, ) - // gateway 2 still in charge; - expGraph.DeploymentName.Name = "gateway-2-test-class" - // no HTTP routes remain - // GRPCRoute 2 still exists - // TLSRoute 2 still exists - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] - - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) - - listener80.Routes[grpcRouteKey2] = expRouteGR2 - listener443.Routes[grpcRouteKey2] = expRouteGR2 - tlsListener.L4Routes[trKey2] = expRouteTR2 - - delete(expGraph.Routes, httpRouteKey1) - delete(expGraph.Routes, grpcRouteKey1) - expGraph.Routes[grpcRouteKey2] = expRouteGR2 - - delete(expGraph.L4Routes, trKey1) - expGraph.L4Routes[trKey2] = expRouteTR2 - - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, - } - - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - processAndValidateGraph(expGraph) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] + + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, + } + + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, + } + + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{ + grpcRouteKey2: expRouteGR2, + } + + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + refGRPCSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the second GRPCRoute is deleted", func() { @@ -1684,43 +2044,92 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "gr-2"}, ) - // gateway 2 still in charge; - expGraph.DeploymentName.Name = "gateway-2-test-class" - // no routes remain - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) - - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] - tlsListener.L4Routes[trKey2] = expRouteTR2 - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{} - delete(expGraph.L4Routes, trKey1) - expGraph.L4Routes[trKey2] = expRouteTR2 + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{} - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, } - delete(expGraph.ReferencedServices, expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - delete(expGraph.ReferencedServices, expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName) - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} - processAndValidateGraph(expGraph) + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{ + trKey2: expRouteTR2, + } + + expGraph2.ReferencedServices = map[types.NamespacedName]*graph.ReferencedService{ + refTLSSvc: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway-2"}: {}, + }, + }, + } + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the second TLSRoute is deleted", func() { @@ -1730,39 +2139,81 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Namespace: "test", Name: "tr-2"}, ) - // gateway 2 still in charge; - expGraph.DeploymentName.Name = "gateway-2-test-class" - // no HTTP or TLS routes remain - listener80 := getListenerByName(expGraph.Gateway, httpListenerName) - listener443 := getListenerByName(expGraph.Gateway, httpsListenerName) - tlsListener := getListenerByName(expGraph.Gateway, tlsListenerName) + // gateway 2 only remains; + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: gw2, + Listeners: []*graph.Listener{ + { + Name: httpListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: httpsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + {Kind: v1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: tlsListenerName, + GatewayName: client.ObjectKeyFromObject(gw2), + Source: gw2.Spec.Listeners[2], + Valid: true, + Attachable: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, + L4Routes: map[graph.L4RouteKey]*graph.L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: v1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + }, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, + } - expGraph.Gateway.Source = gw2 - listener80.Source = gw2.Spec.Listeners[0] - listener443.Source = gw2.Spec.Listeners[1] - tlsListener.Source = gw2.Spec.Listeners[2] + gw := expGraph2.Gateways[types.NamespacedName{Namespace: "test", Name: "gateway-2"}] - delete(listener80.Routes, httpRouteKey1) - delete(listener443.Routes, httpRouteKey1) - delete(listener80.Routes, grpcRouteKey1) - delete(listener443.Routes, grpcRouteKey1) - delete(tlsListener.L4Routes, trKey1) + listener80 := getListenerByName(gw, httpListenerName) + listener80.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} + listener443 := getListenerByName(gw, httpsListenerName) + listener443.Routes = map[graph.RouteKey]*graph.L7Route{} - sameNsTLSSecretRef := helpers.GetPointer(client.ObjectKeyFromObject(sameNsTLSSecret)) - listener443.ResolvedSecret = sameNsTLSSecretRef - expGraph.ReferencedSecrets[client.ObjectKeyFromObject(sameNsTLSSecret)] = &graph.Secret{ - Source: sameNsTLSSecret, - CertBundle: sameNsTLSCert, - } + tlsListener := getListenerByName(gw, tlsListenerName) + tlsListener.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expGraph.ReferencedServices = nil + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - processAndValidateGraph(expGraph) + expGraph2.ReferencedServices = nil + expGraph2.ReferencedSecrets = map[types.NamespacedName]*graph.Secret{ + client.ObjectKeyFromObject(sameNsTLSSecret): { + Source: sameNsTLSSecret, + CertBundle: sameNsTLSCert, + }, + } + processAndValidateGraph(expGraph2) }) }) When("the GatewayClass is deleted", func() { @@ -1772,21 +2223,40 @@ var _ = Describe("ChangeProcessor", func() { types.NamespacedName{Name: gcName}, ) - expGraph.GatewayClass = nil - expGraph.Gateway = &graph.Gateway{ - Source: gw2, - Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + expGraph2.GatewayClass = nil + expGraph2.Gateways = map[types.NamespacedName]*graph.Gateway{ + {Namespace: "test", Name: "gateway-2"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway-2", + Generation: 1, + }, + Spec: v1.GatewaySpec{ + GatewayClassName: "test-class", + Listeners: []v1.Listener{ + createHTTPListener(), + createHTTPSListener(httpsListenerName, sameNsTLSSecret), + createTLSListener(tlsListenerName), + }, + }, + }, + Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-test-class", + }, + }, } - expGraph.DeploymentName.Name = "gateway-2-test-class" - expGraph.Routes = map[graph.RouteKey]*graph.L7Route{} - expGraph.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} - expGraph.ReferencedSecrets = nil + expGraph2.Routes = map[graph.RouteKey]*graph.L7Route{} + expGraph2.L4Routes = map[graph.L4RouteKey]*graph.L4Route{} + expGraph2.ReferencedSecrets = nil expRouteHR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} expRouteGR1.Spec.Rules[0].BackendRefs[0].SvcNsName = types.NamespacedName{} - expGraph.ReferencedServices = nil + expGraph2.ReferencedServices = nil - processAndValidateGraph(expGraph) + processAndValidateGraph(expGraph2) }) }) When("the second Gateway is deleted", func() { @@ -2488,8 +2958,9 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(paramGW) changed, graph := processor.Process() + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.Gateway.NginxProxy.Source).To(Equal(np)) + Expect(gw.NginxProxy.Source).To(Equal(np)) }) It("captures changes for an NginxProxy", func() { processor.CaptureUpsertChange(npUpdated) @@ -2497,14 +2968,16 @@ var _ = Describe("ChangeProcessor", func() { changed, graph := processor.Process() Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.Gateway.NginxProxy.Source).To(Equal(npUpdated)) + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] + Expect(gw.NginxProxy.Source).To(Equal(npUpdated)) }) It("handles deletes for an NginxProxy", func() { processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) changed, graph := processor.Process() Expect(changed).To(Equal(state.ClusterStateChange)) - Expect(graph.Gateway.NginxProxy).To(BeNil()) + gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] + Expect(gw.NginxProxy).To(BeNil()) }) }) }) diff --git a/internal/mode/static/state/conditions/conditions.go b/internal/mode/static/state/conditions/conditions.go index b82ccead99..9452ceb533 100644 --- a/internal/mode/static/state/conditions/conditions.go +++ b/internal/mode/static/state/conditions/conditions.go @@ -107,10 +107,6 @@ const ( // has an overlapping hostname:port/path combination with another Route. PolicyReasonTargetConflict v1alpha2.PolicyConditionReason = "TargetConflict" - // GatewayIgnoredReason is used with v1.RouteConditionAccepted when the route references a Gateway that is ignored - // by NGF. - GatewayIgnoredReason v1.RouteConditionReason = "GatewayIgnored" - // GatewayResolvedRefs condition indicates whether the controller was able to resolve the // parametersRef on the Gateway. GatewayResolvedRefs v1.GatewayConditionType = "ResolvedRefs" @@ -127,17 +123,6 @@ const ( GatewayReasonParamsRefInvalid v1.GatewayConditionReason = "ParametersRefInvalid" ) -// NewRouteNotAcceptedGatewayIgnored returns a Condition that indicates that the Route is not accepted by the Gateway -// because the Gateway is ignored by NGF. -func NewRouteNotAcceptedGatewayIgnored() conditions.Condition { - return conditions.Condition{ - Type: string(v1.RouteConditionAccepted), - Status: metav1.ConditionFalse, - Reason: string(GatewayIgnoredReason), - Message: "The Gateway is ignored by the controller", - } -} - // NewDefaultRouteConditions returns the default conditions that must be present in the status of a Route. func NewDefaultRouteConditions() []conditions.Condition { return []conditions.Condition{ diff --git a/internal/mode/static/state/dataplane/configuration.go b/internal/mode/static/state/dataplane/configuration.go index 7945a02da6..0512876405 100644 --- a/internal/mode/static/state/dataplane/configuration.go +++ b/internal/mode/static/state/dataplane/configuration.go @@ -31,26 +31,27 @@ const ( func BuildConfiguration( ctx context.Context, g *graph.Graph, + gateway *graph.Gateway, serviceResolver resolver.ServiceResolver, configVersion int, plus bool, ) Configuration { - if g.GatewayClass == nil || !g.GatewayClass.Valid || g.Gateway == nil { - config := GetDefaultConfiguration(g, configVersion) + if g.GatewayClass == nil || !g.GatewayClass.Valid || gateway == nil { + config := GetDefaultConfiguration(g, configVersion, gateway) if plus { - config.NginxPlus = buildNginxPlus(g) + config.NginxPlus = buildNginxPlus(gateway) } return config } - baseHTTPConfig := buildBaseHTTPConfig(g) + baseHTTPConfig := buildBaseHTTPConfig(g, gateway) - httpServers, sslServers := buildServers(g) + httpServers, sslServers := buildServers(gateway) backendGroups := buildBackendGroups(append(httpServers, sslServers...)) upstreams := buildUpstreams( ctx, - g.Gateway.Listeners, + gateway, serviceResolver, g.ReferencedServices, baseHTTPConfig.IPFamily, @@ -58,25 +59,25 @@ func BuildConfiguration( var nginxPlus NginxPlus if plus { - nginxPlus = buildNginxPlus(g) + nginxPlus = buildNginxPlus(gateway) } config := Configuration{ HTTPServers: httpServers, SSLServers: sslServers, - TLSPassthroughServers: buildPassthroughServers(g), + TLSPassthroughServers: buildPassthroughServers(gateway), Upstreams: upstreams, - StreamUpstreams: buildStreamUpstreams(ctx, g.Gateway.Listeners, serviceResolver, baseHTTPConfig.IPFamily), + StreamUpstreams: buildStreamUpstreams(ctx, gateway, serviceResolver, baseHTTPConfig.IPFamily), BackendGroups: backendGroups, - SSLKeyPairs: buildSSLKeyPairs(g.ReferencedSecrets, g.Gateway.Listeners), + SSLKeyPairs: buildSSLKeyPairs(g.ReferencedSecrets, gateway.Listeners), Version: configVersion, CertBundles: buildCertBundles( buildRefCertificateBundles(g.ReferencedSecrets, g.ReferencedCaCertConfigMaps), backendGroups, ), - Telemetry: buildTelemetry(g), + Telemetry: buildTelemetry(g, gateway), BaseHTTPConfig: baseHTTPConfig, - Logging: buildLogging(g), + Logging: buildLogging(gateway), NginxPlus: nginxPlus, MainSnippets: buildSnippetsForContext(g.SnippetsFilters, ngfAPIv1alpha1.NginxContextMain), AuxiliarySecrets: buildAuxiliarySecrets(g.PlusSecrets), @@ -86,13 +87,13 @@ func BuildConfiguration( } // buildPassthroughServers builds TLSPassthroughServers from TLSRoutes attaches to listeners. -func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { +func buildPassthroughServers(gateway *graph.Gateway) []Layer4VirtualServer { passthroughServersMap := make(map[graph.L4RouteKey][]Layer4VirtualServer) listenerPassthroughServers := make([]Layer4VirtualServer, 0) passthroughServerCount := 0 - for _, l := range g.Gateway.Listeners { + for _, l := range gateway.Listeners { if !l.Valid || l.Source.Protocol != v1.TLSProtocolType { continue } @@ -105,7 +106,8 @@ func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { var hostnames []string for _, p := range r.ParentRefs { - if val, exist := p.Attachment.AcceptedHostnames[l.Name]; exist { + key := graph.CreateGatewayListenerKey(l.GatewayName, l.Name) + if val, exist := p.Attachment.AcceptedHostnames[key]; exist { hostnames = val break } @@ -157,7 +159,7 @@ func buildPassthroughServers(g *graph.Graph) []Layer4VirtualServer { // buildStreamUpstreams builds all stream upstreams. func buildStreamUpstreams( ctx context.Context, - listeners []*graph.Listener, + gateway *graph.Gateway, serviceResolver resolver.ServiceResolver, ipFamily IPFamilyType, ) []Upstream { @@ -165,7 +167,7 @@ func buildStreamUpstreams( // We use a map to deduplicate them. uniqueUpstreams := make(map[string]Upstream) - for _, l := range listeners { + for _, l := range gateway.Listeners { if !l.Valid || l.Source.Protocol != v1.TLSProtocolType { continue } @@ -181,6 +183,11 @@ func buildStreamUpstreams( continue } + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if _, ok := br.InvalidForGateways[gatewayNSName]; ok { + continue + } + upstreamName := br.ServicePortReference() if _, exist := uniqueUpstreams[upstreamName]; exist { @@ -338,7 +345,12 @@ func buildBackendGroups(servers []VirtualServer) []BackendGroup { return groups } -func newBackendGroup(refs []graph.BackendRef, sourceNsName types.NamespacedName, ruleIdx int) BackendGroup { +func newBackendGroup( + refs []graph.BackendRef, + gatewayName types.NamespacedName, + sourceNsName types.NamespacedName, + ruleIdx int, +) BackendGroup { var backends []Backend if len(refs) > 0 { @@ -350,10 +362,15 @@ func newBackendGroup(refs []graph.BackendRef, sourceNsName types.NamespacedName, continue } + valid := ref.Valid + if _, ok := ref.InvalidForGateways[gatewayName]; ok { + valid = false + } + backends = append(backends, Backend{ UpstreamName: ref.ServicePortReference(), Weight: ref.Weight, - Valid: ref.Valid, + Valid: valid, VerifyTLS: convertBackendTLS(ref.BackendTLSPolicy), }) } @@ -379,13 +396,13 @@ func convertBackendTLS(btp *graph.BackendTLSPolicy) *VerifyTLS { return verify } -func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { +func buildServers(gateway *graph.Gateway) (http, ssl []VirtualServer) { rulesForProtocol := map[v1.ProtocolType]portPathRules{ v1.HTTPProtocolType: make(portPathRules), v1.HTTPSProtocolType: make(portPathRules), } - for _, l := range g.Gateway.Listeners { + for _, l := range gateway.Listeners { if l.Source.Protocol == v1.TLSProtocolType { continue } @@ -396,7 +413,7 @@ func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { rulesForProtocol[l.Source.Protocol][l.Source.Port] = rules } - rules.upsertListener(l) + rules.upsertListener(l, gateway) } } @@ -405,7 +422,7 @@ func buildServers(g *graph.Graph) (http, ssl []VirtualServer) { httpServers, sslServers := httpRules.buildServers(), sslRules.buildServers() - pols := buildPolicies(g.Gateway.Policies) + pols := buildPolicies(gateway, gateway.Policies) for i := range httpServers { httpServers[i].Policies = pols @@ -457,7 +474,7 @@ func newHostPathRules() *hostPathRules { } } -func (hpr *hostPathRules) upsertListener(l *graph.Listener) { +func (hpr *hostPathRules) upsertListener(l *graph.Listener, gateway *graph.Gateway) { hpr.listenersExist = true hpr.port = int32(l.Source.Port) @@ -470,13 +487,14 @@ func (hpr *hostPathRules) upsertListener(l *graph.Listener) { continue } - hpr.upsertRoute(r, l) + hpr.upsertRoute(r, l, gateway) } } func (hpr *hostPathRules) upsertRoute( route *graph.L7Route, listener *graph.Listener, + gateway *graph.Gateway, ) { var hostnames []string GRPC := route.RouteType == graph.RouteTypeGRPC @@ -490,7 +508,9 @@ func (hpr *hostPathRules) upsertRoute( } for _, p := range route.ParentRefs { - if val, exist := p.Attachment.AcceptedHostnames[string(listener.Source.Name)]; exist { + key := graph.CreateGatewayListenerKey(listener.GatewayName, listener.Name) + + if val, exist := p.Attachment.AcceptedHostnames[key]; exist { hostnames = val break } @@ -525,7 +545,7 @@ func (hpr *hostPathRules) upsertRoute( } } - pols := buildPolicies(route.Policies) + pols := buildPolicies(gateway, route.Policies) for _, h := range hostnames { for _, m := range rule.Matches { @@ -549,7 +569,7 @@ func (hpr *hostPathRules) upsertRoute( hostRule.MatchRules = append(hostRule.MatchRules, MatchRule{ Source: objectSrc, - BackendGroup: newBackendGroup(rule.BackendRefs, routeNsName, idx), + BackendGroup: newBackendGroup(rule.BackendRefs, listener.GatewayName, routeNsName, idx), Filters: filters, Match: convertMatch(m), }) @@ -646,7 +666,7 @@ func (hpr *hostPathRules) maxServerCount() int { func buildUpstreams( ctx context.Context, - listeners []*graph.Listener, + gateway *graph.Gateway, svcResolver resolver.ServiceResolver, referencedServices map[types.NamespacedName]*graph.ReferencedService, ipFamily IPFamilyType, @@ -658,7 +678,7 @@ func buildUpstreams( // We need to build endpoints based on the IPFamily of NGINX. allowedAddressType := getAllowedAddressType(ipFamily) - for _, l := range listeners { + for _, l := range gateway.Listeners { if !l.Valid { continue } @@ -673,33 +693,18 @@ func buildUpstreams( // don't generate upstreams for rules that have invalid matches or filters continue } + for _, br := range rule.BackendRefs { - if br.Valid { - upstreamName := br.ServicePortReference() - _, exist := uniqueUpstreams[upstreamName] - - if exist { - continue - } - - var errMsg string - - eps, err := svcResolver.Resolve(ctx, br.SvcNsName, br.ServicePort, allowedAddressType) - if err != nil { - errMsg = err.Error() - } - - var upstreamPolicies []policies.Policy - if graphSvc, exists := referencedServices[br.SvcNsName]; exists { - upstreamPolicies = buildPolicies(graphSvc.Policies) - } - - uniqueUpstreams[upstreamName] = Upstream{ - Name: upstreamName, - Endpoints: eps, - ErrorMsg: errMsg, - Policies: upstreamPolicies, - } + if upstream := buildUpstream( + ctx, + br, + gateway, + svcResolver, + referencedServices, + uniqueUpstreams, + allowedAddressType, + ); upstream != nil { + uniqueUpstreams[upstream.Name] = *upstream } } } @@ -718,6 +723,51 @@ func buildUpstreams( return upstreams } +func buildUpstream( + ctx context.Context, + br graph.BackendRef, + gateway *graph.Gateway, + svcResolver resolver.ServiceResolver, + referencedServices map[types.NamespacedName]*graph.ReferencedService, + uniqueUpstreams map[string]Upstream, + allowedAddressType []discoveryV1.AddressType, +) *Upstream { + if !br.Valid { + return nil + } + + gatewayNSName := client.ObjectKeyFromObject(gateway.Source) + if _, ok := br.InvalidForGateways[gatewayNSName]; ok { + return nil + } + + upstreamName := br.ServicePortReference() + _, exist := uniqueUpstreams[upstreamName] + + if exist { + return nil + } + + var errMsg string + + eps, err := svcResolver.Resolve(ctx, br.SvcNsName, br.ServicePort, allowedAddressType) + if err != nil { + errMsg = err.Error() + } + + var upstreamPolicies []policies.Policy + if graphSvc, exists := referencedServices[br.SvcNsName]; exists { + upstreamPolicies = buildPolicies(gateway, graphSvc.Policies) + } + + return &Upstream{ + Name: upstreamName, + Endpoints: eps, + ErrorMsg: errMsg, + Policies: upstreamPolicies, + } +} + func getAllowedAddressType(ipFamily IPFamilyType) []discoveryV1.AddressType { switch ipFamily { case IPv4: @@ -840,13 +890,13 @@ func telemetryEnabled(gw *graph.Gateway) bool { } // buildTelemetry generates the Otel configuration. -func buildTelemetry(g *graph.Graph) Telemetry { - if !telemetryEnabled(g.Gateway) { +func buildTelemetry(g *graph.Graph, gateway *graph.Gateway) Telemetry { + if !telemetryEnabled(gateway) { return Telemetry{} } - serviceName := fmt.Sprintf("ngf:%s:%s", g.Gateway.Source.Namespace, g.Gateway.Source.Name) - telemetry := g.Gateway.EffectiveNginxProxy.Telemetry + serviceName := fmt.Sprintf("ngf:%s:%s", gateway.Source.Namespace, gateway.Source.Name) + telemetry := gateway.EffectiveNginxProxy.Telemetry if telemetry.ServiceName != nil { serviceName = serviceName + ":" + *telemetry.ServiceName } @@ -909,7 +959,7 @@ func CreateRatioVarName(ratio int32) string { } // buildBaseHTTPConfig generates the base http context config that should be applied to all servers. -func buildBaseHTTPConfig(g *graph.Graph) BaseHTTPConfig { +func buildBaseHTTPConfig(g *graph.Graph, gateway *graph.Gateway) BaseHTTPConfig { baseConfig := BaseHTTPConfig{ // HTTP2 should be enabled by default HTTP2: true, @@ -918,7 +968,7 @@ func buildBaseHTTPConfig(g *graph.Graph) BaseHTTPConfig { } // safe to access EffectiveNginxProxy since we only call this function when the Gateway is not nil. - np := g.Gateway.EffectiveNginxProxy + np := gateway.EffectiveNginxProxy if np == nil { return baseConfig } @@ -999,8 +1049,8 @@ func buildSnippetsForContext( return snippetsForContext } -func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { - if len(graphPolicies) == 0 { +func buildPolicies(gateway *graph.Gateway, graphPolicies []*graph.Policy) []policies.Policy { + if len(graphPolicies) == 0 || gateway == nil { return nil } @@ -1010,6 +1060,9 @@ func buildPolicies(graphPolicies []*graph.Policy) []policies.Policy { if !policy.Valid { continue } + if _, exists := policy.InvalidForGateways[client.ObjectKeyFromObject(gateway.Source)]; exists { + continue + } finalPolicies = append(finalPolicies, policy.Source) } @@ -1025,14 +1078,14 @@ func convertAddresses(addresses []ngfAPIv1alpha2.RewriteClientIPAddress) []strin return trustedAddresses } -func buildLogging(g *graph.Graph) Logging { +func buildLogging(gateway *graph.Gateway) Logging { logSettings := Logging{ErrorLevel: defaultErrorLogLevel} - if g.Gateway == nil || g.Gateway.EffectiveNginxProxy == nil { + if gateway == nil || gateway.EffectiveNginxProxy == nil { return logSettings } - ngfProxy := g.Gateway.EffectiveNginxProxy + ngfProxy := gateway.EffectiveNginxProxy if ngfProxy.Logging != nil { if ngfProxy.Logging.ErrorLevel != nil { logSettings.ErrorLevel = string(*ngfProxy.Logging.ErrorLevel) @@ -1056,14 +1109,14 @@ func buildAuxiliarySecrets( return auxSecrets } -func buildNginxPlus(g *graph.Graph) NginxPlus { +func buildNginxPlus(gateway *graph.Gateway) NginxPlus { nginxPlusSettings := NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} - if g.Gateway == nil || g.Gateway.EffectiveNginxProxy == nil { + if gateway == nil || gateway.EffectiveNginxProxy == nil { return nginxPlusSettings } - ngfProxy := g.Gateway.EffectiveNginxProxy + ngfProxy := gateway.EffectiveNginxProxy if ngfProxy.NginxPlus != nil { if ngfProxy.NginxPlus.AllowedAddresses != nil { addresses := make([]string, 0, len(ngfProxy.NginxPlus.AllowedAddresses)) @@ -1078,10 +1131,10 @@ func buildNginxPlus(g *graph.Graph) NginxPlus { return nginxPlusSettings } -func GetDefaultConfiguration(g *graph.Graph, configVersion int) Configuration { +func GetDefaultConfiguration(g *graph.Graph, configVersion int, gateway *graph.Gateway) Configuration { return Configuration{ Version: configVersion, - Logging: buildLogging(g), + Logging: buildLogging(gateway), NginxPlus: NginxPlus{}, AuxiliarySecrets: buildAuxiliarySecrets(g.PlusSecrets), } diff --git a/internal/mode/static/state/dataplane/configuration_test.go b/internal/mode/static/state/dataplane/configuration_test.go index 8aed106e7e..7f3a50795d 100644 --- a/internal/mode/static/state/dataplane/configuration_test.go +++ b/internal/mode/static/state/dataplane/configuration_test.go @@ -21,6 +21,7 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/policies" @@ -70,15 +71,27 @@ func getExpectedConfiguration() Configuration { } } +var gatewayNsName = types.NamespacedName{ + Namespace: "test", + Name: "gateway", +} + func getNormalGraph() *graph.Graph { return &graph.Graph{ GatewayClass: &graph.GatewayClass{ Source: &v1.GatewayClass{}, Valid: true, }, - Gateway: &graph.Gateway{ - Source: &v1.Gateway{}, - Listeners: []*graph.Listener{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + gatewayNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Listeners: []*graph.Listener{}, + }, }, Routes: map[graph.RouteKey]*graph.L7Route{}, ReferencedSecrets: map[types.NamespacedName]*graph.Secret{}, @@ -255,9 +268,12 @@ func TestBuildConfiguration(t *testing.T) { Valid: true, ParentRefs: []graph.ParentRef{ { + Gateway: &graph.ParentRefGateway{ + NamespacedName: gatewayNsName, + }, Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - listenerName: hostnames, + graph.CreateGatewayListenerKey(gatewayNsName, listenerName): hostnames, }, }, }, @@ -473,7 +489,8 @@ func TestBuildConfiguration(t *testing.T) { pathAndType{path: "/", pathType: prefix}, ) // add extra attachment for this route for duplicate listener test - httpsRouteHR5.ParentRefs[0].Attachment.AcceptedHostnames["listener-443-1"] = []string{"example.com"} + key := graph.CreateGatewayListenerKey(gatewayNsName, "listener-443-1") + httpsRouteHR5.ParentRefs[0].Attachment.AcceptedHostnames[key] = []string{"example.com"} httpsHR6, expHTTPSHR6Groups, httpsRouteHR6 := createTestResources( "https-hr-6", @@ -506,14 +523,14 @@ func TestBuildConfiguration(t *testing.T) { { Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443-2": {"app.example.com"}, + graph.CreateGatewayListenerKey(gatewayNsName, "listener-443-2"): {"app.example.com"}, }, }, }, { Attachment: &graph.ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-444-3": {"app.example.com"}, + graph.CreateGatewayListenerKey(gatewayNsName, "listener-444-3"): {"app.example.com"}, }, }, }, @@ -945,10 +962,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, }) return g }), @@ -961,19 +980,22 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr1Invalid): routeHR1Invalid, }, }, { - Name: "listener-443-1", - Source: listener443, // nil hostname - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, // nil hostname + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR1Invalid): httpsRouteHR1Invalid, }, @@ -1000,9 +1022,11 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { Name: "listener-443-1", + GatewayName: gatewayNsName, Source: listener443, // nil hostname Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1010,6 +1034,7 @@ func TestBuildConfiguration(t *testing.T) { }, { Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, Source: listener443WithHostname, // non-nil hostname Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1046,8 +1071,10 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ Name: "invalid-listener", + GatewayName: gatewayNsName, Source: invalidListener, Valid: false, ResolvedSecret: &secret1NsName, @@ -1069,10 +1096,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr1): routeHR1, graph.CreateRouteKey(hr2): routeHR2, @@ -1130,10 +1159,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(gr): routeGR, }, @@ -1170,11 +1201,13 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR1): httpsRouteHR1, graph.CreateRouteKey(httpsHR2): httpsRouteHR2, @@ -1182,9 +1215,10 @@ func TestBuildConfiguration(t *testing.T) { ResolvedSecret: &secret1NsName, }, { - Name: "listener-443-with-hostname", - Source: listener443WithHostname, - Valid: true, + Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, + Source: listener443WithHostname, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, @@ -1280,20 +1314,23 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr3): routeHR3, graph.CreateRouteKey(hr4): routeHR4, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR3): httpsRouteHR3, graph.CreateRouteKey(httpsHR4): httpsRouteHR4, @@ -1420,36 +1457,41 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr3): routeHR3, }, }, { - Name: "listener-8080", - Source: listener8080, - Valid: true, + Name: "listener-8080", + GatewayName: gatewayNsName, + Source: listener8080, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr8): routeHR8, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR3): httpsRouteHR3, }, ResolvedSecret: &secret1NsName, }, { - Name: "listener-8443", - Source: listener8443, - Valid: true, + Name: "listener-8443", + GatewayName: gatewayNsName, + Source: listener8443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR7): httpsRouteHR7, }, @@ -1629,7 +1671,7 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway = nil + delete(g.Gateways, gatewayNsName) return g }), expConf: defaultConfig, @@ -1637,10 +1679,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr5): routeHR5, }, @@ -1696,29 +1740,33 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr6): routeHR6, }, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR6): httpsRouteHR6, }, ResolvedSecret: &secret1NsName, }, { - Name: "listener-443-2", - Source: listener443_2, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + Name: "listener-443-2", + GatewayName: gatewayNsName, + Source: listener443_2, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{ TR1Key: &tlsTR1, TR2Key: &invalidBackendRefTR2, @@ -1726,10 +1774,11 @@ func TestBuildConfiguration(t *testing.T) { ResolvedSecret: &secret1NsName, }, { - Name: "listener-444-3", - Source: listener444_3, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + Name: "listener-444-3", + GatewayName: gatewayNsName, + Source: listener444_3, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, L4Routes: map[graph.L4RouteKey]*graph.L4Route{ TR1Key: &tlsTR1, TR2Key: &invalidBackendRefTR2, @@ -1738,6 +1787,7 @@ func TestBuildConfiguration(t *testing.T) { }, { Name: "listener-443-4", + GatewayName: gatewayNsName, Source: listener443_4, Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{}, @@ -1840,10 +1890,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hr7): routeHR7, }, @@ -1892,20 +1944,23 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-443-with-hostname", - Source: listener443WithHostname, - Valid: true, + Name: "listener-443-with-hostname", + GatewayName: gatewayNsName, + Source: listener443WithHostname, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, ResolvedSecret: &secret2NsName, }, { - Name: "listener-443-1", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR5): httpsRouteHR5, }, @@ -1970,10 +2025,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-443", - Source: listener443, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR8): httpsRouteHR8, }, @@ -2029,10 +2086,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-443", - Source: listener443, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHR9): httpsRouteHR9, }, @@ -2088,10 +2147,12 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithMirror): routeHRWithMirror, }, @@ -2138,17 +2199,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = nginxProxy + gw.EffectiveNginxProxy = nginxProxy return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2170,26 +2233,29 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Listeners = append(g.Gateway.Listeners, []*graph.Listener{ + gw := g.Gateways[gatewayNsName] + gw.Listeners = append(gw.Listeners, []*graph.Listener{ { - Name: "listener-80-1", - Source: listener80, - Valid: true, + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithPolicy): l7RouteWithPolicy, }, }, { - Name: "listener-443", - Source: listener443, - Valid: true, + Name: "listener-443-1", + GatewayName: gatewayNsName, + Source: listener443, + Valid: true, Routes: map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(httpsHRWithPolicy): l7HTTPSRouteWithPolicy, }, ResolvedSecret: &secret1NsName, }, }...) - g.Gateway.Policies = []*graph.Policy{gwPolicy1, gwPolicy2} + gw.Policies = []*graph.Policy{gwPolicy1, gwPolicy2} g.Routes = map[graph.RouteKey]*graph.L7Route{ graph.CreateRouteKey(hrWithPolicy): l7RouteWithPolicy, graph.CreateRouteKey(httpsHRWithPolicy): l7HTTPSRouteWithPolicy, @@ -2265,17 +2331,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = nginxProxyIPv4 + gw.EffectiveNginxProxy = nginxProxyIPv4 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2288,17 +2356,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = nginxProxyIPv6 + gw.EffectiveNginxProxy = nginxProxyIPv6 return g }), expConf: getModifiedExpectedConfiguration(func(conf Configuration) Configuration { @@ -2311,17 +2381,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ SetIPRecursively: helpers.GetPointer(true), TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ @@ -2353,17 +2425,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ Logging: &ngfAPIv1alpha2.NginxLogging{ ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, @@ -2416,17 +2490,19 @@ func TestBuildConfiguration(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ NginxPlus: &ngfAPIv1alpha2.NginxPlus{ AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, @@ -2453,6 +2529,7 @@ func TestBuildConfiguration(t *testing.T) { result := BuildConfiguration( context.TODO(), test.graph, + test.graph.Gateways[gatewayNsName], fakeResolver, 1, false, @@ -2505,17 +2582,19 @@ func TestBuildConfiguration_Plus(t *testing.T) { }{ { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway.Source.ObjectMeta = metav1.ObjectMeta{ + gw := g.Gateways[gatewayNsName] + gw.Source.ObjectMeta = metav1.ObjectMeta{ Name: "gw", Namespace: "ns", } - g.Gateway.Listeners = append(g.Gateway.Listeners, &graph.Listener{ - Name: "listener-80-1", - Source: listener80, - Valid: true, - Routes: map[graph.RouteKey]*graph.L7Route{}, + gw.Listeners = append(gw.Listeners, &graph.Listener{ + Name: "listener-80-1", + GatewayName: gatewayNsName, + Source: listener80, + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{}, }) - g.Gateway.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ + gw.EffectiveNginxProxy = &graph.EffectiveNginxProxy{ NginxPlus: &ngfAPIv1alpha2.NginxPlus{ AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, @@ -2551,7 +2630,7 @@ func TestBuildConfiguration_Plus(t *testing.T) { }, { graph: getModifiedGraph(func(g *graph.Graph) *graph.Graph { - g.Gateway = nil + delete(g.Gateways, gatewayNsName) return g }), expConf: defaultPlusConfig, @@ -2567,6 +2646,7 @@ func TestBuildConfiguration_Plus(t *testing.T) { result := BuildConfiguration( context.TODO(), test.graph, + test.graph.Gateways[gatewayNsName], fakeResolver, 1, true, @@ -2599,7 +2679,7 @@ func TestNewBackendGroup_Mirror(t *testing.T) { IsMirrorBackend: true, } - group := newBackendGroup([]graph.BackendRef{backendRef}, types.NamespacedName{}, 0) + group := newBackendGroup([]graph.BackendRef{backendRef}, types.NamespacedName{}, types.NamespacedName{}, 0) g.Expect(group.Backends).To(BeEmpty()) } @@ -3011,6 +3091,13 @@ func TestBuildUpstreams(t *testing.T) { }, } + invalidEndpoints := []resolver.Endpoint{ + { + Address: "11.5.5.5", + Port: 80, + }, + } + bazEndpoints := []resolver.Endpoint{ { Address: "12.0.0.0", @@ -3074,6 +3161,11 @@ func TestBuildUpstreams(t *testing.T) { hr1Refs1 := createBackendRefs("baz", "", "") // empty service names should be ignored + hr1Refs2 := createBackendRefs("invalid-for-gateway") + hr1Refs2[0].InvalidForGateways = map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: {}, + } + hr2Refs0 := createBackendRefs("foo", "baz") // shouldn't duplicate foo and baz upstream hr2Refs1 := createBackendRefs("nil-endpoints") @@ -3096,7 +3188,7 @@ func TestBuildUpstreams(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "hr1", Namespace: "test"}}: { Valid: true, Spec: graph.L7RouteSpec{ - Rules: refsToValidRules(hr1Refs0, hr1Refs1), + Rules: refsToValidRules(hr1Refs0, hr1Refs1, hr1Refs2), }, }, {NamespacedName: types.NamespacedName{Name: "hr2", Namespace: "test"}}: { @@ -3158,36 +3250,44 @@ func TestBuildUpstreams(t *testing.T) { }, } - listeners := []*graph.Listener{ - { - Name: "invalid-listener", - Valid: false, - Routes: routesWithNonExistingRefs, // shouldn't be included since listener is invalid - }, - { - Name: "listener-1", - Valid: true, - Routes: routes, - }, - { - Name: "listener-2", - Valid: true, - Routes: routes2, - }, - { - Name: "listener-3", - Valid: true, - Routes: invalidRoutes, // shouldn't be included since routes are invalid - }, - { - Name: "listener-4", - Valid: true, - Routes: routes3, + gateway := &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, }, - { - Name: "listener-5", - Valid: true, - Routes: routesWithPolicies, + Listeners: []*graph.Listener{ + { + Name: "invalid-listener", + Valid: false, + Routes: routesWithNonExistingRefs, // shouldn't be included since listener is invalid + }, + { + Name: "listener-1", + Valid: true, + Routes: routes, + }, + { + Name: "listener-2", + Valid: true, + Routes: routes2, + }, + { + Name: "listener-3", + Valid: true, + Routes: invalidRoutes, // shouldn't be included since routes are invalid + }, + { + Name: "listener-4", + Valid: true, + Routes: routes3, + }, + { + Name: "listener-5", + Valid: true, + Routes: routesWithPolicies, + }, }, } @@ -3196,13 +3296,14 @@ func TestBuildUpstreams(t *testing.T) { invalidPolicy := &policiesfakes.FakePolicy{} referencedServices := map[types.NamespacedName]*graph.ReferencedService{ - {Name: "bar", Namespace: "test"}: {}, - {Name: "baz", Namespace: "test"}: {}, - {Name: "baz2", Namespace: "test"}: {}, - {Name: "foo", Namespace: "test"}: {}, - {Name: "empty-endpoints", Namespace: "test"}: {}, - {Name: "nil-endpoints", Namespace: "test"}: {}, - {Name: "ipv6-endpoints", Namespace: "test"}: {}, + {Name: "bar", Namespace: "test"}: {}, + {Name: "invalid-for-gateway", Namespace: "test"}: {}, + {Name: "baz", Namespace: "test"}: {}, + {Name: "baz2", Namespace: "test"}: {}, + {Name: "foo", Namespace: "test"}: {}, + {Name: "empty-endpoints", Namespace: "test"}: {}, + {Name: "nil-endpoints", Namespace: "test"}: {}, + {Name: "ipv6-endpoints", Namespace: "test"}: {}, {Name: "policies", Namespace: "test"}: { Policies: []*graph.Policy{ { @@ -3272,6 +3373,8 @@ func TestBuildUpstreams(t *testing.T) { switch svcNsName.Name { case "bar": return barEndpoints, nil + case "invalid-for-gateway": + return invalidEndpoints, nil case "baz": return bazEndpoints, nil case "baz2": @@ -3295,7 +3398,7 @@ func TestBuildUpstreams(t *testing.T) { g := NewWithT(t) - upstreams := buildUpstreams(context.TODO(), listeners, fakeResolver, referencedServices, Dual) + upstreams := buildUpstreams(context.TODO(), gateway, fakeResolver, referencedServices, Dual) g.Expect(upstreams).To(ConsistOf(expUpstreams)) } @@ -3568,8 +3671,10 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: nil, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: nil, + }, }, }, expTelemetry: Telemetry{}, @@ -3577,8 +3682,8 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: {EffectiveNginxProxy: &graph.EffectiveNginxProxy{}}, }, }, expTelemetry: Telemetry{}, @@ -3586,14 +3691,16 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Telemetry: &ngfAPIv1alpha2.Telemetry{ - Exporter: &ngfAPIv1alpha2.TelemetryExporter{ - Endpoint: helpers.GetPointer("my-otel.svc:4563"), - }, - DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ - ngfAPIv1alpha2.DisableTracing, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("my-otel.svc:4563"), + }, + DisabledFeatures: []ngfAPIv1alpha2.DisableTelemetryFeature{ + ngfAPIv1alpha2.DisableTracing, + }, }, }, }, @@ -3604,10 +3711,12 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Telemetry: &ngfAPIv1alpha2.Telemetry{ - Exporter: nil, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: nil, + }, }, }, }, @@ -3617,11 +3726,13 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Telemetry: &ngfAPIv1alpha2.Telemetry{ - Exporter: &ngfAPIv1alpha2.TelemetryExporter{ - Endpoint: nil, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: nil, + }, }, }, }, @@ -3632,14 +3743,16 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, - EffectiveNginxProxy: telemetryConfigured, }, }, expTelemetry: createTelemetry(), @@ -3647,14 +3760,16 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, - EffectiveNginxProxy: telemetryConfigured, }, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { @@ -3682,14 +3797,16 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, - EffectiveNginxProxy: telemetryConfigured, }, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { @@ -3752,14 +3869,16 @@ func TestBuildTelemetry(t *testing.T) { }, { g: &graph.Graph{ - Gateway: &graph.Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "ns", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "ns", + }, }, + EffectiveNginxProxy: telemetryConfigured, }, - EffectiveNginxProxy: telemetryConfigured, }, NGFPolicies: map[graph.PolicyKey]*graph.Policy{ {NsName: types.NamespacedName{Name: "obsPolicy"}}: { @@ -3786,7 +3905,7 @@ func TestBuildTelemetry(t *testing.T) { t.Run(tc.msg, func(t *testing.T) { t.Parallel() g := NewWithT(t) - tel := buildTelemetry(tc.g) + tel := buildTelemetry(tc.g, tc.g.Gateways[types.NamespacedName{}]) sort.Slice(tel.Ratios, func(i, j int) bool { return tel.Ratios[i].Value < tel.Ratios[j].Value }) @@ -3819,6 +3938,7 @@ func TestBuildPolicies(t *testing.T) { tests := []struct { name string + gateway *graph.Gateway policies []*graph.Policy expPolicies []string }{ @@ -3831,24 +3951,37 @@ func TestBuildPolicies(t *testing.T) { name: "mix of valid and invalid policies", policies: []*graph.Policy{ { - Source: getPolicy("Kind1", "valid1"), - Valid: true, + Source: getPolicy("Kind1", "valid1"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind2", "valid2"), - Valid: true, + Source: getPolicy("Kind2", "valid2"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind1", "invalid1"), - Valid: false, + Source: getPolicy("Kind1", "invalid1"), + Valid: false, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind2", "invalid2"), - Valid: false, + Source: getPolicy("Kind2", "invalid2"), + Valid: false, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, { - Source: getPolicy("Kind3", "valid3"), - Valid: true, + Source: getPolicy("Kind3", "valid3"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + }, + gateway: &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, }, }, expPolicies: []string{ @@ -3857,6 +3990,27 @@ func TestBuildPolicies(t *testing.T) { "valid3", }, }, + { + name: "invalid for a Gateway", + policies: []*graph.Policy{ + { + Source: getPolicy("Kind1", "valid1"), + Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway"}: {}, + }, + }, + }, + gateway: &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, + }, + }, + expPolicies: nil, + }, } for _, test := range tests { @@ -3864,7 +4018,7 @@ func TestBuildPolicies(t *testing.T) { t.Parallel() g := NewWithT(t) - pols := buildPolicies(test.policies) + pols := buildPolicies(test.gateway, test.policies) g.Expect(pols).To(HaveLen(len(test.expPolicies))) for _, pol := range pols { g.Expect(test.expPolicies).To(ContainElement(pol.GetName())) @@ -3930,97 +4084,107 @@ func TestCreatePassthroughServers(t *testing.T) { secureAppKey := getL4RouteKey("secure-app") secureApp2Key := getL4RouteKey("secure-app2") secureApp3Key := getL4RouteKey("secure-app3") - testGraph := graph.Graph{ - Gateway: &graph.Gateway{ - Listeners: []*graph.Listener{ - { - Name: "testingListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - Hostname: helpers.GetPointer[v1.Hostname]("*.example.com"), - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureAppKey: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + gateway := &graph.Gateway{ + Listeners: []*graph.Listener{ + { + Name: "testingListener", + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + Hostname: helpers.GetPointer[v1.Hostname]("*.example.com"), + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureAppKey: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, - ParentRefs: []graph.ParentRef{ - { - Attachment: &graph.ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{ - "testingListener": {"app.example.com", "cafe.example.com"}, - }, + }, + ParentRefs: []graph.ParentRef{ + { + Attachment: &graph.ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + graph.CreateGatewayListenerKey( + gatewayNsName, + "testingListener", + ): {"app.example.com", "cafe.example.com"}, }, - SectionName: nil, - Port: nil, - Gateway: types.NamespacedName{}, - Idx: 0, }, + SectionName: nil, + Port: nil, + Gateway: &graph.ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, + Idx: 0, }, }, - secureApp2Key: {}, }, + secureApp2Key: {}, }, - { - Name: "testingListener2", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - Hostname: helpers.GetPointer[v1.Hostname]("cafe.example.com"), - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureApp3Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + { + Name: "testingListener2", + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + Hostname: helpers.GetPointer[v1.Hostname]("cafe.example.com"), + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureApp3Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, }, }, - { - Name: "httpListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.HTTPProtocolType, - }, + }, + { + Name: "httpListener", + Valid: true, + Source: v1.Listener{ + Protocol: v1.HTTPProtocolType, }, }, }, } - passthroughServers := buildPassthroughServers(&testGraph) + passthroughServers := buildPassthroughServers(gateway) expectedPassthroughServers := []Layer4VirtualServer{ { @@ -4069,79 +4233,107 @@ func TestBuildStreamUpstreams(t *testing.T) { secureApp3Key := getL4RouteKey("secure-app3") secureApp4Key := getL4RouteKey("secure-app4") secureApp5Key := getL4RouteKey("secure-app5") - testGraph := graph.Graph{ - Gateway: &graph.Gateway{ - Listeners: []*graph.Listener{ - { - Name: "testingListener", - Valid: true, - Source: v1.Listener{ - Protocol: v1.TLSProtocolType, - Port: 443, - }, - Routes: make(map[graph.RouteKey]*graph.L7Route), - L4Routes: map[graph.L4RouteKey]*graph.L4Route{ - secureAppKey: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + secureApp6Key := getL4RouteKey("secure-app6") + + gateway := &graph.Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Listeners: []*graph.Listener{ + { + Name: "testingListener", + Valid: true, + Source: v1.Listener{ + Protocol: v1.TLSProtocolType, + Port: 443, + }, + Routes: make(map[graph.RouteKey]*graph.L7Route), + L4Routes: map[graph.L4RouteKey]*graph.L4Route{ + secureAppKey: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, - secureApp2Key: {}, - secureApp3Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"test.example.com"}, - BackendRef: graph.BackendRef{}, + }, + secureApp2Key: {}, + secureApp3Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"test.example.com"}, + BackendRef: graph.BackendRef{}, + }, + }, + secureApp4Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureAppKey.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, + }, + }, }, }, - secureApp4Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app.example.com", "cafe.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureAppKey.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + secureApp5Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app2.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + SvcNsName: secureApp5Key.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, }, - secureApp5Key: { - Valid: true, - Spec: graph.L4RouteSpec{ - Hostnames: []v1.Hostname{"app2.example.com"}, - BackendRef: graph.BackendRef{ - Valid: true, - SvcNsName: secureApp5Key.NamespacedName, - ServicePort: apiv1.ServicePort{ - Name: "https", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8443, - }, + }, + secureApp6Key: { + Valid: true, + Spec: graph.L4RouteSpec{ + Hostnames: []v1.Hostname{"app2.example.com"}, + BackendRef: graph.BackendRef{ + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: {}, + }, + SvcNsName: secureApp6Key.NamespacedName, + ServicePort: apiv1.ServicePort{ + Name: "https", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8443, }, }, }, @@ -4169,7 +4361,7 @@ func TestBuildStreamUpstreams(t *testing.T) { return fakeEndpoints, nil } - streamUpstreams := buildStreamUpstreams(context.Background(), testGraph.Gateway.Listeners, &fakeResolver, Dual) + streamUpstreams := buildStreamUpstreams(context.Background(), gateway, &fakeResolver, Dual) expectedStreamUpstreams := []Upstream{ { @@ -4196,8 +4388,10 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "no rewrite IP settings configured", g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, + }, }, }, expRewriteIPSettings: RewriteClientIPSettings{}, @@ -4205,17 +4399,19 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with proxyProtocol", g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), - TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "10.9.9.4/32", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeProxyProtocol), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "10.9.9.4/32", + }, }, + SetIPRecursively: helpers.GetPointer(true), }, - SetIPRecursively: helpers.GetPointer(true), }, }, }, @@ -4229,17 +4425,19 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with xForwardedFor", g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "76.89.90.11/24", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "76.89.90.11/24", + }, }, + SetIPRecursively: helpers.GetPointer(true), }, - SetIPRecursively: helpers.GetPointer(true), }, }, }, @@ -4253,29 +4451,31 @@ func TestBuildRewriteIPSettings(t *testing.T) { { msg: "rewrite IP settings configured with recursive set to false and multiple trusted addresses", g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ - Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), - TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "5.5.5.5/12", - }, - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "1.1.1.1/26", - }, - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "2.2.2.2/32", - }, - { - Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, - Value: "3.3.3.3/24", + Gateways: map[types.NamespacedName]*graph.Gateway{ + {}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + RewriteClientIP: &ngfAPIv1alpha2.RewriteClientIP{ + Mode: helpers.GetPointer(ngfAPIv1alpha2.RewriteClientIPModeXForwardedFor), + TrustedAddresses: []ngfAPIv1alpha2.RewriteClientIPAddress{ + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "5.5.5.5/12", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "1.1.1.1/26", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "2.2.2.2/32", + }, + { + Type: ngfAPIv1alpha2.RewriteClientIPCIDRAddressType, + Value: "3.3.3.3/24", + }, }, + SetIPRecursively: helpers.GetPointer(false), }, - SetIPRecursively: helpers.GetPointer(false), }, }, }, @@ -4292,7 +4492,7 @@ func TestBuildRewriteIPSettings(t *testing.T) { t.Run(tc.msg, func(t *testing.T) { t.Parallel() g := NewWithT(t) - baseConfig := buildBaseHTTPConfig(tc.g) + baseConfig := buildBaseHTTPConfig(tc.g, tc.g.Gateways[types.NamespacedName{}]) g.Expect(baseConfig.RewriteClientIPSettings).To(Equal(tc.expRewriteIPSettings)) }) } @@ -4304,44 +4504,36 @@ func TestBuildLogging(t *testing.T) { t.Parallel() tests := []struct { msg string - g *graph.Graph + gw *graph.Gateway expLoggingSettings Logging }{ { - msg: "Gateway is nil", - g: &graph.Graph{ - Gateway: nil, - }, + msg: "Gateway is nil", + gw: nil, expLoggingSettings: defaultLogging, }, { msg: "Gateway has no effective NginxProxy", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: nil, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: nil, }, expLoggingSettings: defaultLogging, }, { msg: "Effective NginxProxy does not specify log level", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, }, expLoggingSettings: defaultLogging, }, { msg: "Effective NginxProxy log level set to debug", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), }, }, }, @@ -4349,12 +4541,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to info", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelInfo), }, }, }, @@ -4362,12 +4552,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to notice", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelNotice), }, }, }, @@ -4375,12 +4563,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to warn", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelWarn), }, }, }, @@ -4388,12 +4574,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to error", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, }, @@ -4401,12 +4585,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to crit", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelCrit), }, }, }, @@ -4414,12 +4596,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to alert", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelAlert), }, }, }, @@ -4427,12 +4607,10 @@ func TestBuildLogging(t *testing.T) { }, { msg: "Effective NginxProxy log level set to emerg", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelEmerg), }, }, }, @@ -4445,7 +4623,7 @@ func TestBuildLogging(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildLogging(tc.g)).To(Equal(tc.expLoggingSettings)) + g.Expect(buildLogging(tc.gw)).To(Equal(tc.expLoggingSettings)) }) } } @@ -4666,32 +4844,28 @@ func TestBuildNginxPlus(t *testing.T) { t.Parallel() tests := []struct { msg string - g *graph.Graph + gw *graph.Gateway expNginxPlus NginxPlus }{ { msg: "NginxProxy is nil", - g: &graph.Graph{}, + gw: &graph.Gateway{}, expNginxPlus: defaultNginxPlus, }, { msg: "NginxPlus default values are used when NginxProxy doesn't specify NginxPlus settings", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{}, }, expNginxPlus: defaultNginxPlus, }, { msg: "NginxProxy specifies one allowed address", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - NginxPlus: &ngfAPIv1alpha2.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, }, }, }, @@ -4700,14 +4874,12 @@ func TestBuildNginxPlus(t *testing.T) { }, { msg: "NginxProxy specifies multiple allowed addresses", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - NginxPlus: &ngfAPIv1alpha2.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, - {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.3"}, + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "25.0.0.3"}, }, }, }, @@ -4716,13 +4888,11 @@ func TestBuildNginxPlus(t *testing.T) { }, { msg: "NginxProxy specifies 127.0.0.1 as allowed address", - g: &graph.Graph{ - Gateway: &graph.Gateway{ - EffectiveNginxProxy: &graph.EffectiveNginxProxy{ - NginxPlus: &ngfAPIv1alpha2.NginxPlus{ - AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ - {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, - }, + gw: &graph.Gateway{ + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + NginxPlus: &ngfAPIv1alpha2.NginxPlus{ + AllowedAddresses: []ngfAPIv1alpha2.NginxPlusAllowAddress{ + {Type: ngfAPIv1alpha2.NginxPlusAllowIPAddressType, Value: "127.0.0.1"}, }, }, }, @@ -4736,7 +4906,7 @@ func TestBuildNginxPlus(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildNginxPlus(tc.g)).To(Equal(tc.expNginxPlus)) + g.Expect(buildNginxPlus(tc.gw)).To(Equal(tc.expNginxPlus)) }) } } diff --git a/internal/mode/static/state/graph/backend_refs.go b/internal/mode/static/state/graph/backend_refs.go index b8775df4e7..cf9339cc7a 100644 --- a/internal/mode/static/state/graph/backend_refs.go +++ b/internal/mode/static/state/graph/backend_refs.go @@ -22,6 +22,10 @@ import ( type BackendRef struct { // BackendTLSPolicy is the BackendTLSPolicy of the Service which is referenced by the backendRef. BackendTLSPolicy *BackendTLSPolicy + // InvalidForGateways is a map of Gateways for which this BackendRef is invalid for, with the corresponding + // condition. Certain NginxProxy configurations may result in a backend not being valid for some Gateways, + // but not others. + InvalidForGateways map[types.NamespacedName]conditions.Condition // SvcNsName is the NamespacedName of the Service referenced by the backendRef. SvcNsName types.NamespacedName // ServicePort is the ServicePort of the Service which is referenced by the backendRef. @@ -48,10 +52,9 @@ func addBackendRefsToRouteRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *EffectiveNginxProxy, ) { for _, r := range routes { - addBackendRefsToRules(r, refGrantResolver, services, backendTLSPolicies, npCfg) + addBackendRefsToRules(r, refGrantResolver, services, backendTLSPolicies) } } @@ -62,7 +65,6 @@ func addBackendRefsToRules( refGrantResolver *referenceGrantResolver, services map[types.NamespacedName]*v1.Service, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *EffectiveNginxProxy, ) { if !route.Valid { return @@ -91,19 +93,18 @@ func addBackendRefsToRules( } routeNs := route.Source.GetNamespace() - ref, cond := createBackendRef( + ref, conds := createBackendRef( ref, - routeNs, + route, refGrantResolver.refAllowedFrom(getRefGrantFromResourceForRoute(route.RouteType, routeNs)), services, refPath, backendTLSPolicies, - npCfg, ) backendRefs = append(backendRefs, ref) - if cond != nil { - route.Conditions = append(route.Conditions, *cond) + if len(conds) > 0 { + route.Conditions = append(route.Conditions, conds...) } } @@ -123,13 +124,12 @@ func addBackendRefsToRules( func createBackendRef( ref RouteBackendRef, - sourceNamespace string, + route *L7Route, refGrantResolver func(resource toResource) bool, services map[types.NamespacedName]*v1.Service, refPath *field.Path, backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, - npCfg *EffectiveNginxProxy, -) (BackendRef, *conditions.Condition) { +) (BackendRef, []conditions.Condition) { // Data plane will handle invalid ref by responding with 500. // Because of that, we always need to add a BackendRef to group.Backends, even if the ref is invalid. // Additionally, we always calculate the weight, even if it is invalid. @@ -143,80 +143,75 @@ func createBackendRef( } } - var backendRef BackendRef - - valid, cond := validateRouteBackendRef(ref, sourceNamespace, refGrantResolver, refPath) + valid, cond := validateRouteBackendRef(ref, route.Source.GetNamespace(), refGrantResolver, refPath) if !valid { - backendRef = BackendRef{ - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + Weight: weight, + Valid: false, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - return backendRef, &cond + return backendRef, []conditions.Condition{cond} } - ns := sourceNamespace + ns := route.Source.GetNamespace() if ref.Namespace != nil { ns = string(*ref.Namespace) } svcNsName := types.NamespacedName{Name: string(ref.Name), Namespace: ns} svcIPFamily, svcPort, err := getIPFamilyAndPortFromRef(ref.BackendRef, svcNsName, services, refPath) if err != nil { - backendRef = BackendRef{ - Weight: weight, - Valid: false, - SvcNsName: svcNsName, - ServicePort: v1.ServicePort{}, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + Weight: weight, + Valid: false, + SvcNsName: svcNsName, + ServicePort: v1.ServicePort{}, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - cond := staticConds.NewRouteBackendRefRefBackendNotFound(err.Error()) - return backendRef, &cond + return backendRef, []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())} } - if err := verifyIPFamily(npCfg, svcIPFamily); err != nil { - backendRef = BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + var conds []conditions.Condition + invalidForGateways := make(map[types.NamespacedName]conditions.Condition) + for _, parentRef := range route.ParentRefs { + if err := verifyIPFamily(parentRef.Gateway.EffectiveNginxProxy, svcIPFamily); err != nil { + invalidForGateways[parentRef.Gateway.NamespacedName] = staticConds.NewRouteInvalidIPFamily(err.Error()) } - - cond := staticConds.NewRouteInvalidIPFamily(err.Error()) - return backendRef, &cond } backendTLSPolicy, err := findBackendTLSPolicyForService( backendTLSPolicies, ref.Namespace, string(ref.Name), - sourceNamespace, + route.Source.GetNamespace(), ) if err != nil { - backendRef = BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Weight: weight, - Valid: false, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + SvcNsName: svcNsName, + ServicePort: svcPort, + Weight: weight, + Valid: false, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: invalidForGateways, } - cond := staticConds.NewRouteBackendRefUnsupportedValue(err.Error()) - return backendRef, &cond + return backendRef, append(conds, staticConds.NewRouteBackendRefUnsupportedValue(err.Error())) } - backendRef = BackendRef{ - SvcNsName: svcNsName, - BackendTLSPolicy: backendTLSPolicy, - ServicePort: svcPort, - Valid: true, - Weight: weight, - IsMirrorBackend: ref.MirrorBackendIdx != nil, + backendRef := BackendRef{ + SvcNsName: svcNsName, + BackendTLSPolicy: backendTLSPolicy, + ServicePort: svcPort, + Valid: true, + Weight: weight, + IsMirrorBackend: ref.MirrorBackendIdx != nil, + InvalidForGateways: invalidForGateways, } - return backendRef, nil + return backendRef, conds } // validateBackendTLSPolicyMatchingAllBackends validates that all backends in a rule reference the same diff --git a/internal/mode/static/state/graph/backend_refs_test.go b/internal/mode/static/state/graph/backend_refs_test.go index 6106b791d6..543973758a 100644 --- a/internal/mode/static/state/graph/backend_refs_test.go +++ b/internal/mode/static/state/graph/backend_refs_test.go @@ -377,13 +377,13 @@ func TestVerifyIPFamily(t *testing.T) { } } -func TestAddBackendRefsToRulesTest(t *testing.T) { +func TestAddBackendRefsToRules(t *testing.T) { t.Parallel() sectionNameRefs := []ParentRef{ { Idx: 0, - Gateway: types.NamespacedName{Namespace: "test", Name: "gateway"}, + Gateway: &ParentRefGateway{NamespacedName: types.NamespacedName{Namespace: "test", Name: "gateway"}}, Attachment: &ParentRefAttachmentStatus{ Attached: true, }, @@ -589,10 +589,11 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr1", "Service", 1, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -603,16 +604,18 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr2", "Service", 2, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[1], - Valid: true, - Weight: 5, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[1], + Valid: true, + Weight: 5, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -623,18 +626,20 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr2", "Service", 2, "svc1"), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: true, - Weight: 1, - BackendTLSPolicy: btp3, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: true, + Weight: 1, + BackendTLSPolicy: btp3, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[1], - Valid: true, - Weight: 5, - BackendTLSPolicy: btp3, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[1], + Valid: true, + Weight: 5, + BackendTLSPolicy: btp3, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: nil, @@ -675,7 +680,8 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { route: createRoute("hr3", "NotService", 1, "svc1"), expectedBackendRefs: []BackendRef{ { - Weight: 1, + Weight: 1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: []conditions.Condition{ @@ -693,18 +699,20 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { }), expectedBackendRefs: []BackendRef{ { - SvcNsName: svc1NsName, - ServicePort: svc1.Spec.Ports[0], - Valid: false, - Weight: 1, - BackendTLSPolicy: btp1, + SvcNsName: svc1NsName, + ServicePort: svc1.Spec.Ports[0], + Valid: false, + Weight: 1, + BackendTLSPolicy: btp1, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, { - SvcNsName: svc2NsName, - ServicePort: svc2.Spec.Ports[1], - Valid: false, - Weight: 5, - BackendTLSPolicy: btp2, + SvcNsName: svc2NsName, + ServicePort: svc2.Spec.Ports[1], + Valid: false, + Weight: 5, + BackendTLSPolicy: btp2, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, expectedConditions: []conditions.Condition{ @@ -732,7 +740,7 @@ func TestAddBackendRefsToRulesTest(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(nil) - addBackendRefsToRules(test.route, resolver, services, test.policies, nil) + addBackendRefsToRules(test.route, resolver, services, test.policies) var actual []BackendRef if test.route.Spec.Rules != nil { @@ -824,11 +832,11 @@ func TestCreateBackend(t *testing.T) { } tests := []struct { - expectedCondition *conditions.Condition nginxProxySpec *EffectiveNginxProxy name string expectedServicePortReference string ref gatewayv1.HTTPBackendRef + expectedConditions []conditions.Condition expectedBackend BackendRef }{ { @@ -836,13 +844,14 @@ func TestCreateBackend(t *testing.T) { BackendRef: getNormalRef(), }, expectedBackend: BackendRef{ - SvcNsName: svc1NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: true, + SvcNsName: svc1NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service1_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal case", }, { @@ -853,13 +862,14 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc1NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 1, - Valid: true, + SvcNsName: svc1NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 1, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service1_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal with nil weight", }, { @@ -870,17 +880,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: types.NamespacedName{}, - ServicePort: v1.ServicePort{}, - Weight: 0, - Valid: false, + SvcNsName: types.NamespacedName{}, + ServicePort: v1.ServicePort{}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + Weight: 0, + Valid: false, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefUnsupportedValue( "test.weight: Invalid value: -1: must be in the range [0, 1000000]", ), - ), + }, name: "invalid weight", }, { @@ -891,17 +902,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: types.NamespacedName{}, - ServicePort: v1.ServicePort{}, - Weight: 5, - Valid: false, + SvcNsName: types.NamespacedName{}, + ServicePort: v1.ServicePort{}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, + Weight: 5, + Valid: false, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefInvalidKind( `test.kind: Unsupported value: "NotService": supported values: "Service"`, ), - ), + }, name: "invalid kind", }, { @@ -918,31 +930,33 @@ func TestCreateBackend(t *testing.T) { Namespace: "test", Name: "not-exist", }, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefRefBackendNotFound(`test.name: Not found: "not-exist"`), - ), + }, name: "service doesn't exist", }, { ref: gatewayv1.HTTPBackendRef{ - BackendRef: getModifiedRef(func(backend gatewayv1.BackendRef) gatewayv1.BackendRef { - backend.Name = "service2" - return backend - }), + BackendRef: getNormalRef(), }, expectedBackend: BackendRef{ - SvcNsName: svc2NamespacedName, + SvcNsName: svc1NamespacedName, ServicePort: svc1.Spec.Ports[0], Weight: 5, - Valid: false, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: staticConds.NewRouteInvalidIPFamily( + `service configured with IPv4 family but NginxProxy is configured with IPv6`, + ), + }, }, - nginxProxySpec: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6)}, - expectedCondition: helpers.GetPointer( - staticConds.NewRouteInvalidIPFamily(`service configured with IPv4 family but NginxProxy is configured with IPv6`), - ), - name: "service IPFamily doesn't match NginxProxy IPFamily", + expectedServicePortReference: "test_service1_80", + nginxProxySpec: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6)}, + expectedConditions: nil, + name: "service IPFamily doesn't match NginxProxy IPFamily", }, { ref: gatewayv1.HTTPBackendRef{ @@ -952,14 +966,15 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc2NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: true, - BackendTLSPolicy: &btp, + SvcNsName: svc2NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: true, + BackendTLSPolicy: &btp, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "test_service2_80", - expectedCondition: nil, + expectedConditions: nil, name: "normal case with policy", }, { @@ -970,17 +985,18 @@ func TestCreateBackend(t *testing.T) { }), }, expectedBackend: BackendRef{ - SvcNsName: svc3NamespacedName, - ServicePort: svc1.Spec.Ports[0], - Weight: 5, - Valid: false, + SvcNsName: svc3NamespacedName, + ServicePort: svc1.Spec.Ports[0], + Weight: 5, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, expectedServicePortReference: "", - expectedCondition: helpers.GetPointer( + expectedConditions: []conditions.Condition{ staticConds.NewRouteBackendRefUnsupportedValue( "the backend TLS policy is invalid: unsupported value", ), - ), + }, name: "invalid policy", }, } @@ -995,8 +1011,6 @@ func TestCreateBackend(t *testing.T) { client.ObjectKeyFromObject(btp2.Source): &btp2, } - sourceNamespace := "test" - refPath := field.NewPath("test") alwaysTrueRefGrantResolver := func(_ toResource) bool { return true } @@ -1011,18 +1025,36 @@ func TestCreateBackend(t *testing.T) { test.ref.BackendRef, []any{}, } - backend, cond := createBackendRef( + route := &L7Route{ + Source: &gatewayv1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + }, + }, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + EffectiveNginxProxy: test.nginxProxySpec, + }, + }, + }, + } + + backend, conds := createBackendRef( rbr, - sourceNamespace, + route, alwaysTrueRefGrantResolver, services, refPath, policies, - test.nginxProxySpec, ) g.Expect(helpers.Diff(test.expectedBackend, backend)).To(BeEmpty()) - g.Expect(cond).To(Equal(test.expectedCondition)) + g.Expect(conds).To(Equal(test.expectedConditions)) servicePortRef := backend.ServicePortReference() g.Expect(servicePortRef).To(Equal(test.expectedServicePortReference)) @@ -1037,14 +1069,31 @@ func TestCreateBackend(t *testing.T) { []any{}, } + route := &L7Route{ + Source: &gatewayv1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + }, + }, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, + }, + }, + } + backend, conds := createBackendRef( ref, - "test-ns", + route, alwaysTrueRefGrantResolver, services, refPath, policies, - nil, ) g.Expect(conds).To(BeNil()) diff --git a/internal/mode/static/state/graph/backend_tls_policy.go b/internal/mode/static/state/graph/backend_tls_policy.go index 4e00eecc56..67563eefe5 100644 --- a/internal/mode/static/state/graph/backend_tls_policy.go +++ b/internal/mode/static/state/graph/backend_tls_policy.go @@ -10,6 +10,7 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha3" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) @@ -18,8 +19,8 @@ type BackendTLSPolicy struct { Source *v1alpha3.BackendTLSPolicy // CaCertRef is the name of the ConfigMap that contains the CA certificate. CaCertRef types.NamespacedName - // Gateway is the name of the Gateway that is being checked for this BackendTLSPolicy. - Gateway types.NamespacedName + // Gateways are the names of the Gateways that are being checked for this BackendTLSPolicy. + Gateways []types.NamespacedName // Conditions include Conditions for the BackendTLSPolicy. Conditions []conditions.Condition // Valid shows whether the BackendTLSPolicy is valid. @@ -35,9 +36,9 @@ func processBackendTLSPolicies( configMapResolver *configMapResolver, secretResolver *secretResolver, ctlrName string, - gateway *Gateway, + gateways map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*BackendTLSPolicy { - if len(backendTLSPolicies) == 0 || gateway == nil { + if len(backendTLSPolicies) == 0 || len(gateways) == 0 { return nil } @@ -57,12 +58,8 @@ func processBackendTLSPolicies( Source: backendTLSPolicy, Valid: valid, Conditions: conds, - Gateway: types.NamespacedName{ - Namespace: gateway.Source.Namespace, - Name: gateway.Source.Name, - }, - CaCertRef: caCertRef, - Ignored: ignored, + CaCertRef: caCertRef, + Ignored: ignored, } } return processedBackendTLSPolicies @@ -134,7 +131,7 @@ func validateBackendTLSCACertRef( secretResolver *secretResolver, ) error { if len(btp.Spec.Validation.CACertificateRefs) != 1 { - path := field.NewPath("tls.cacertrefs") + path := field.NewPath("validation.caCertificateRefs") valErr := field.TooMany(path, len(btp.Spec.Validation.CACertificateRefs), 1) return valErr } @@ -143,13 +140,13 @@ func validateBackendTLSCACertRef( allowedCaCertKinds := []v1.Kind{"ConfigMap", "Secret"} if !slices.Contains(allowedCaCertKinds, selectedCertRef.Kind) { - path := field.NewPath("tls.cacertrefs[0].kind") + path := field.NewPath("validation.caCertificateRefs[0].kind") valErr := field.NotSupported(path, btp.Spec.Validation.CACertificateRefs[0].Kind, allowedCaCertKinds) return valErr } if selectedCertRef.Group != "" && selectedCertRef.Group != "core" { - path := field.NewPath("tls.cacertrefs[0].group") + path := field.NewPath("validation.caCertificateRefs[0].group") valErr := field.NotSupported(path, selectedCertRef.Group, []string{"", "core"}) return valErr } @@ -161,12 +158,12 @@ func validateBackendTLSCACertRef( switch selectedCertRef.Kind { case "ConfigMap": if err := configMapResolver.resolve(nsName); err != nil { - path := field.NewPath("tls.cacertrefs[0]") + path := field.NewPath("validation.caCertificateRefs[0]") return field.Invalid(path, selectedCertRef, err.Error()) } case "Secret": if err := secretResolver.resolve(nsName); err != nil { - path := field.NewPath("tls.cacertrefs[0]") + path := field.NewPath("validation.caCertificateRefs[0]") return field.Invalid(path, selectedCertRef, err.Error()) } default: @@ -186,3 +183,32 @@ func validateBackendTLSWellKnownCACerts(btp *v1alpha3.BackendTLSPolicy) error { } return nil } + +func addGatewaysForBackendTLSPolicies( + backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy, + services map[types.NamespacedName]*ReferencedService, +) { + for _, backendTLSPolicy := range backendTLSPolicies { + gateways := make(map[types.NamespacedName]struct{}) + + for _, refs := range backendTLSPolicy.Source.Spec.TargetRefs { + if refs.Kind != kinds.Service { + continue + } + + for svcNsName, referencedServices := range services { + if svcNsName.Name != string(refs.Name) { + continue + } + + for gateway := range referencedServices.GatewayNsNames { + gateways[gateway] = struct{}{} + } + } + } + + for gateway := range gateways { + backendTLSPolicy.Gateways = append(backendTLSPolicy.Gateways, gateway) + } + } +} diff --git a/internal/mode/static/state/graph/backend_tls_policy_test.go b/internal/mode/static/state/graph/backend_tls_policy_test.go index cea42d64e9..12f0d7264f 100644 --- a/internal/mode/static/state/graph/backend_tls_policy_test.go +++ b/internal/mode/static/state/graph/backend_tls_policy_test.go @@ -46,27 +46,29 @@ func TestProcessBackendTLSPoliciesEmpty(t *testing.T) { }, } - gateway := &Gateway{ - Source: &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "gateway", Namespace: "test"}}, + gateway := map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway"}: { + Source: &gatewayv1.Gateway{ObjectMeta: metav1.ObjectMeta{Name: "gateway", Namespace: "test"}}, + }, } tests := []struct { expected map[types.NamespacedName]*BackendTLSPolicy - gateway *Gateway + gateways map[types.NamespacedName]*Gateway backendTLSPolicies map[types.NamespacedName]*v1alpha3.BackendTLSPolicy name string }{ { name: "no policies", expected: nil, - gateway: gateway, + gateways: gateway, backendTLSPolicies: nil, }, { name: "nil gateway", expected: nil, backendTLSPolicies: backendTLSPolicies, - gateway: nil, + gateways: nil, }, } @@ -75,7 +77,7 @@ func TestProcessBackendTLSPoliciesEmpty(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processBackendTLSPolicies(test.backendTLSPolicies, nil, nil, "test", test.gateway) + processed := processBackendTLSPolicies(test.backendTLSPolicies, nil, nil, "test", test.gateways) g.Expect(processed).To(Equal(test.expected)) }) @@ -93,6 +95,15 @@ func TestValidateBackendTLSPolicy(t *testing.T) { }, } + targetRefInvalidKind := []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Invalid", + Name: "service1", + }, + }, + } + localObjectRefNormalCase := []gatewayv1.LocalObjectReference{ { Kind: "ConfigMap", @@ -119,7 +130,7 @@ func TestValidateBackendTLSPolicy(t *testing.T) { localObjectRefInvalidKind := []gatewayv1.LocalObjectReference{ { - Kind: "Secret", + Kind: "Invalid", Name: "secret", Group: "", }, @@ -299,7 +310,7 @@ func TestValidateBackendTLSPolicy(t *testing.T) { Namespace: "test", }, Spec: v1alpha3.BackendTLSPolicySpec{ - TargetRefs: targetRefNormalCase, + TargetRefs: targetRefInvalidKind, Validation: v1alpha3.BackendTLSPolicyValidation{ CACertificateRefs: localObjectRefInvalidKind, Hostname: "foo.test.com", @@ -475,3 +486,182 @@ func TestValidateBackendTLSPolicy(t *testing.T) { }) } } + +func TestAddGatewaysForBackendTLSPolicies(t *testing.T) { + t.Parallel() + + btp1 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp1", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service1", + }, + }, + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service2", + }, + }, + }, + }, + }, + } + btp1Expected := btp1 + + btp1Expected.Gateways = []types.NamespacedName{ + {Namespace: "test", Name: "gateway1"}, + {Namespace: "test", Name: "gateway2"}, + {Namespace: "test", Name: "gateway3"}, + } + + btp2 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp2", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service3", + }, + }, + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service4", + }, + }, + }, + }, + }, + } + + btp2Expected := btp2 + btp2Expected.Gateways = []types.NamespacedName{ + {Namespace: "test", Name: "gateway4"}, + } + + btp3 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp3", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Service", + Name: "service-does-not-exist", + }, + }, + }, + }, + }, + } + + btp4 := &BackendTLSPolicy{ + Source: &v1alpha3.BackendTLSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "btp4", + Namespace: "test", + }, + Spec: v1alpha3.BackendTLSPolicySpec{ + TargetRefs: []v1alpha2.LocalPolicyTargetReferenceWithSectionName{ + { + LocalPolicyTargetReference: v1alpha2.LocalPolicyTargetReference{ + Kind: "Gateway", + Name: "gateway", + }, + }, + }, + }, + }, + } + + tests := []struct { + backendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy + services map[types.NamespacedName]*ReferencedService + expected map[types.NamespacedName]*BackendTLSPolicy + name string + }{ + { + name: "add multiple gateways to backend tls policies", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp1"}: btp1, + {Namespace: "test", Name: "btp2"}: btp2, + }, + services: map[types.NamespacedName]*ReferencedService{ + {Namespace: "test", Name: "service1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway1"}: {}, + }, + }, + {Namespace: "test", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway2"}: {}, + {Namespace: "test", Name: "gateway3"}: {}, + }, + }, + {Namespace: "test", Name: "service3"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway4"}: {}, + }, + }, + {Namespace: "test", Name: "service4"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gateway4"}: {}, + }, + }, + }, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp1"}: btp1Expected, + {Namespace: "test", Name: "btp2"}: btp2Expected, + }, + }, + { + name: "backend tls policy with a service target ref that does not reference a gateway", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp3"}: btp3, + }, + services: map[types.NamespacedName]*ReferencedService{ + {Namespace: "test", Name: "service1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{}, + }, + }, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp3"}: btp3, + }, + }, + { + name: "backend tls policy that does not reference a service", + backendTLSPolicies: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp4"}: btp4, + }, + services: map[types.NamespacedName]*ReferencedService{}, + expected: map[types.NamespacedName]*BackendTLSPolicy{ + {Namespace: "test", Name: "btp4"}: btp4, + }, + }, + } + + for _, test := range tests { + g := NewWithT(t) + t.Run(test.name, func(t *testing.T) { + t.Parallel() + addGatewaysForBackendTLSPolicies(test.backendTLSPolicies, test.services) + g.Expect(helpers.Diff(test.backendTLSPolicies, test.expected)).To(BeEmpty()) + }) + } +} diff --git a/internal/mode/static/state/graph/gateway.go b/internal/mode/static/state/graph/gateway.go index d41364288d..64604ae4c6 100644 --- a/internal/mode/static/state/graph/gateway.go +++ b/internal/mode/static/state/graph/gateway.go @@ -1,30 +1,31 @@ package graph import ( - "sort" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" - ngfsort "github.com/nginx/nginx-gateway-fabric/internal/mode/static/sort" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) -// Gateway represents the winning Gateway resource. +// Gateway represents a Gateway resource. type Gateway struct { + // LatestReloadResult is the result of the last nginx reload attempt. + LatestReloadResult NginxReloadResult // Source is the corresponding Gateway resource. Source *v1.Gateway // NginxProxy is the NginxProxy referenced by this Gateway. NginxProxy *NginxProxy - /// EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on + // EffectiveNginxProxy holds the result of merging the NginxProxySpec on this resource with the NginxProxySpec on // the GatewayClass resource. This is the effective set of config that should be applied to the Gateway. // If non-nil, then this config is valid. EffectiveNginxProxy *EffectiveNginxProxy + // DeploymentName is the name of the nginx Deployment associated with this Gateway. + DeploymentName types.NamespacedName // Listeners include the listeners of the Gateway. Listeners []*Listener // Conditions holds the conditions for the Gateway. @@ -35,124 +36,95 @@ type Gateway struct { Valid bool } -// processedGateways holds the resources that belong to NGF. -type processedGateways struct { - Winner *v1.Gateway - Ignored map[types.NamespacedName]*v1.Gateway -} - -// GetAllNsNames returns all the NamespacedNames of the Gateway resources that belong to NGF. -func (gws processedGateways) GetAllNsNames() []types.NamespacedName { - winnerCnt := 0 - if gws.Winner != nil { - winnerCnt = 1 - } - - length := winnerCnt + len(gws.Ignored) - if length == 0 { - return nil - } - - allNsNames := make([]types.NamespacedName, 0, length) - - if gws.Winner != nil { - allNsNames = append(allNsNames, client.ObjectKeyFromObject(gws.Winner)) - } - for nsName := range gws.Ignored { - allNsNames = append(allNsNames, nsName) - } - - return allNsNames -} - -// processGateways determines which Gateway resource belong to NGF (determined by the Gateway GatewayClassName field). +// processGateways determines which Gateway resources belong to NGF (determined by the Gateway GatewayClassName field). func processGateways( gws map[types.NamespacedName]*v1.Gateway, gcName string, -) processedGateways { - referencedGws := make([]*v1.Gateway, 0, len(gws)) +) map[types.NamespacedName]*v1.Gateway { + referencedGws := make(map[types.NamespacedName]*v1.Gateway) - for _, gw := range gws { + for gwNsName, gw := range gws { if string(gw.Spec.GatewayClassName) != gcName { continue } - referencedGws = append(referencedGws, gw) + referencedGws[gwNsName] = gw } if len(referencedGws) == 0 { - return processedGateways{} - } - - sort.Slice(referencedGws, func(i, j int) bool { - return ngfsort.LessClientObject(referencedGws[i], referencedGws[j]) - }) - - ignoredGws := make(map[types.NamespacedName]*v1.Gateway) - - for _, gw := range referencedGws[1:] { - ignoredGws[client.ObjectKeyFromObject(gw)] = gw + return nil } - return processedGateways{ - Winner: referencedGws[0], - Ignored: ignoredGws, - } + return referencedGws } -func buildGateway( - gw *v1.Gateway, +func buildGateways( + gws map[types.NamespacedName]*v1.Gateway, secretResolver *secretResolver, gc *GatewayClass, refGrantResolver *referenceGrantResolver, nps map[types.NamespacedName]*NginxProxy, -) *Gateway { - if gw == nil { +) map[types.NamespacedName]*Gateway { + if len(gws) == 0 { return nil } - var np *NginxProxy - if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { - npName := types.NamespacedName{Namespace: gw.Namespace, Name: gw.Spec.Infrastructure.ParametersRef.Name} - np = nps[npName] - } + builtGateways := make(map[types.NamespacedName]*Gateway, len(gws)) - var gcNp *NginxProxy - if gc != nil { - gcNp = gc.NginxProxy - } + for gwNsName, gw := range gws { + var np *NginxProxy + var npNsName types.NamespacedName + if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { + npNsName = types.NamespacedName{Namespace: gw.Namespace, Name: gw.Spec.Infrastructure.ParametersRef.Name} + np = nps[npNsName] + } + + var gcNp *NginxProxy + if gc != nil { + gcNp = gc.NginxProxy + } - effectiveNginxProxy := buildEffectiveNginxProxy(gcNp, np) + effectiveNginxProxy := buildEffectiveNginxProxy(gcNp, np) - conds, valid := validateGateway(gw, gc, np) + conds, valid := validateGateway(gw, gc, np) - if !valid { - return &Gateway{ - Source: gw, - Valid: false, - NginxProxy: np, - EffectiveNginxProxy: effectiveNginxProxy, - Conditions: conds, + protectedPorts := make(ProtectedPorts) + if port, enabled := MetricsEnabledForNginxProxy(effectiveNginxProxy); enabled { + metricsPort := config.DefaultNginxMetricsPort + if port != nil { + metricsPort = *port + } + protectedPorts[metricsPort] = "MetricsPort" } - } - protectedPorts := make(ProtectedPorts) - if port, enabled := MetricsEnabledForNginxProxy(effectiveNginxProxy); enabled { - metricsPort := config.DefaultNginxMetricsPort - if port != nil { - metricsPort = *port + deploymentName := types.NamespacedName{ + Namespace: gw.GetNamespace(), + Name: controller.CreateNginxResourceName(gw.GetName(), string(gw.Spec.GatewayClassName)), } - protectedPorts[metricsPort] = "MetricsPort" - } - return &Gateway{ - Source: gw, - Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), - NginxProxy: np, - EffectiveNginxProxy: effectiveNginxProxy, - Valid: true, - Conditions: conds, + if !valid { + builtGateways[gwNsName] = &Gateway{ + Source: gw, + Valid: false, + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Conditions: conds, + DeploymentName: deploymentName, + } + } else { + builtGateways[gwNsName] = &Gateway{ + Source: gw, + Listeners: buildListeners(gw, secretResolver, refGrantResolver, protectedPorts), + NginxProxy: np, + EffectiveNginxProxy: effectiveNginxProxy, + Valid: true, + Conditions: conds, + DeploymentName: deploymentName, + } + } } + + return builtGateways } func validateGatewayParametersRef(npCfg *NginxProxy, ref v1.LocalParametersReference) []conditions.Condition { @@ -214,12 +186,9 @@ func validateGateway(gw *v1.Gateway, gc *GatewayClass, npCfg *NginxProxy) ([]con conds = append(conds, staticConds.NewGatewayUnsupportedValue(valErr.Error())...) } - valid := true // we evaluate validity before validating parametersRef because an invalid parametersRef/NginxProxy does not // invalidate the entire Gateway. - if len(conds) > 0 { - valid = false - } + valid := !(len(conds) > 0) if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { paramConds := validateGatewayParametersRef(npCfg, *gw.Spec.Infrastructure.ParametersRef) diff --git a/internal/mode/static/state/graph/gateway_listener.go b/internal/mode/static/state/graph/gateway_listener.go index aa5b7062e1..5bf46bd502 100644 --- a/internal/mode/static/state/graph/gateway_listener.go +++ b/internal/mode/static/state/graph/gateway_listener.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" @@ -21,6 +22,8 @@ import ( // For now, we only support HTTP and HTTPS listeners. type Listener struct { Name string + // GatewayName is the name of the Gateway resource this Listener belongs to. + GatewayName types.NamespacedName // Source holds the source of the Listener from the Gateway resource. Source v1.Listener // Routes holds the GRPC/HTTPRoutes attached to the Listener. @@ -57,7 +60,7 @@ func buildListeners( for _, gl := range gw.Spec.Listeners { configurator := listenerFactory.getConfiguratorForListener(gl) - listeners = append(listeners, configurator.configure(gl)) + listeners = append(listeners, configurator.configure(gl, client.ObjectKeyFromObject(gw))) } return listeners @@ -167,7 +170,7 @@ type listenerConfigurator struct { externalReferenceResolvers []listenerExternalReferenceResolver } -func (c *listenerConfigurator) configure(listener v1.Listener) *Listener { +func (c *listenerConfigurator) configure(listener v1.Listener, gwNSName types.NamespacedName) *Listener { var conds []conditions.Condition attachable := true @@ -197,6 +200,7 @@ func (c *listenerConfigurator) configure(listener v1.Listener) *Listener { l := &Listener{ Name: string(listener.Name), + GatewayName: gwNSName, Source: listener, Conditions: conds, AllowedRouteLabelSelector: allowedRouteSelector, diff --git a/internal/mode/static/state/graph/gateway_test.go b/internal/mode/static/state/graph/gateway_test.go index 02d4ff7f18..63bd96e753 100644 --- a/internal/mode/static/state/graph/gateway_test.go +++ b/internal/mode/static/state/graph/gateway_test.go @@ -15,66 +15,17 @@ import ( ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) -func TestProcessedGatewaysGetAllNsNames(t *testing.T) { - t.Parallel() - winner := &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "gateway-1", - }, - } - loser := &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "gateway-2", - }, - } - - tests := []struct { - gws processedGateways - name string - expected []types.NamespacedName - }{ - { - gws: processedGateways{}, - expected: nil, - name: "no gateways", - }, - { - gws: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{ - client.ObjectKeyFromObject(loser): loser, - }, - }, - expected: []types.NamespacedName{ - client.ObjectKeyFromObject(winner), - client.ObjectKeyFromObject(loser), - }, - name: "winner and ignored", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - g := NewWithT(t) - result := test.gws.GetAllNsNames() - g.Expect(result).To(Equal(test.expected)) - }) - } -} - func TestProcessGateways(t *testing.T) { t.Parallel() const gcName = "test-gc" - winner := &v1.Gateway{ + gw1 := &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "gateway-1", @@ -83,7 +34,7 @@ func TestProcessGateways(t *testing.T) { GatewayClassName: gcName, }, } - loser := &v1.Gateway{ + gw2 := &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", Name: "gateway-2", @@ -95,12 +46,12 @@ func TestProcessGateways(t *testing.T) { tests := []struct { gws map[types.NamespacedName]*v1.Gateway - expected processedGateways + expected map[types.NamespacedName]*v1.Gateway name string }{ { gws: nil, - expected: processedGateways{}, + expected: nil, name: "no gateways", }, { @@ -109,29 +60,26 @@ func TestProcessGateways(t *testing.T) { Spec: v1.GatewaySpec{GatewayClassName: "some-class"}, }, }, - expected: processedGateways{}, + expected: nil, name: "unrelated gateway", }, { gws: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-1"}: winner, + {Namespace: "test", Name: "gateway-1"}: gw1, }, - expected: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{}, + expected: map[types.NamespacedName]*v1.Gateway{ + {Namespace: "test", Name: "gateway-1"}: gw1, }, name: "one gateway", }, { gws: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-1"}: winner, - {Namespace: "test", Name: "gateway-2"}: loser, + {Namespace: "test", Name: "gateway-1"}: gw1, + {Namespace: "test", Name: "gateway-2"}: gw2, }, - expected: processedGateways{ - Winner: winner, - Ignored: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "gateway-2"}: loser, - }, + expected: map[types.NamespacedName]*v1.Gateway{ + {Namespace: "test", Name: "gateway-1"}: gw1, + {Namespace: "test", Name: "gateway-2"}: gw2, }, name: "multiple gateways", }, @@ -338,15 +286,18 @@ func TestBuildGateway(t *testing.T) { ) type gatewayCfg struct { + name string ref *v1.LocalParametersReference listeners []v1.Listener addresses []v1.GatewaySpecAddress } var lastCreatedGateway *v1.Gateway - createGateway := func(cfg gatewayCfg) *v1.Gateway { + createGateway := func(cfg gatewayCfg) map[types.NamespacedName]*v1.Gateway { + gatewayMap := make(map[types.NamespacedName]*v1.Gateway) lastCreatedGateway = &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ + Name: cfg.name, Namespace: "test", }, Spec: v1.GatewaySpec{ @@ -361,8 +312,14 @@ func TestBuildGateway(t *testing.T) { ParametersRef: cfg.ref, } } - return lastCreatedGateway + + gatewayMap[types.NamespacedName{ + Namespace: lastCreatedGateway.Namespace, + Name: lastCreatedGateway.Name, + }] = lastCreatedGateway + return gatewayMap } + getLastCreatedGateway := func() *v1.Gateway { return lastCreatedGateway } @@ -374,6 +331,10 @@ func TestBuildGateway(t *testing.T) { }, Spec: ngfAPIv1alpha2.NginxProxySpec{ Logging: &ngfAPIv1alpha2.NginxLogging{ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError)}, + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, }, } validGwNpRef := &v1.LocalParametersReference{ @@ -434,99 +395,122 @@ func TestBuildGateway(t *testing.T) { } tests := []struct { - gateway *v1.Gateway + gateway map[types.NamespacedName]*v1.Gateway gatewayClass *GatewayClass refGrants map[types.NamespacedName]*v1beta1.ReferenceGrant - expected *Gateway + expected map[types.NamespacedName]*Gateway name string }{ { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1, foo8080Listener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, foo8080Listener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8080", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8080Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "foo-8080", - Source: foo8080Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "valid http listeners", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443HTTPSListener1, foo8443HTTPSListener}}, + gatewayCfg{name: "gateway-https", listeners: []v1.Listener{foo443HTTPSListener1, foo8443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway-https"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "foo-8443-https", - Source: foo8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-https", gcName), }, + Valid: true, }, - Valid: true, }, name: "valid https listeners", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{listenerAllowedRoutes}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{listenerAllowedRoutes}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-with-allowed-routes", - Source: listenerAllowedRoutes, - Valid: true, - Attachable: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(labels.Set(labelSet)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-with-allowed-routes", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: listenerAllowedRoutes, + Valid: true, + Attachable: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(labels.Set(labelSet)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "valid http listener with allowed routes label selector", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{crossNamespaceSecretListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{crossNamespaceSecretListener}}), gatewayClass: validGC, refGrants: map[types.NamespacedName]*v1beta1.ReferenceGrant{ {Name: "ref-grant", Namespace: "diff-ns"}: { @@ -552,181 +536,247 @@ func TestBuildGateway(t *testing.T) { }, }, }, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-cross-ns-secret", - Source: crossNamespaceSecretListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretDiffNamespace)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-cross-ns-secret", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: crossNamespaceSecretListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretDiffNamespace)), + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "valid https listener with cross-namespace secret; allowed by reference grant", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: validGwNpRef}), + gateway: createGateway(gatewayCfg{ + name: "gateway-valid-np", + listeners: []v1.Listener{foo80Listener1}, + ref: validGwNpRef, + }), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: validGwNp.Namespace, Name: "gateway-valid-np"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - }, - Valid: true, - NginxProxy: &NginxProxy{ - Source: validGwNp, - Valid: true, - }, - EffectiveNginxProxy: &EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-valid-np", gcName), + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, }, - Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, }, name: "valid http listener with valid NginxProxy; GatewayClass has no NginxProxy", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: validGwNpRef}), + gateway: createGateway(gatewayCfg{ + name: "gateway-valid-np", + listeners: []v1.Listener{foo80Listener1}, + ref: validGwNpRef, + }), gatewayClass: validGCWithNp, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: validGwNp.Namespace, Name: "gateway-valid-np"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - }, - Valid: true, - NginxProxy: &NginxProxy{ - Source: validGwNp, - Valid: true, - }, - EffectiveNginxProxy: &EffectiveNginxProxy{ - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-valid-np", gcName), + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: validGwNp, + Valid: true, }, - IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + EffectiveNginxProxy: &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), + Metrics: &ngfAPIv1alpha2.Metrics{ + Disable: helpers.GetPointer(false), + Port: helpers.GetPointer(int32(90)), + }, + }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, }, - Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, }, name: "valid http listener with valid NginxProxy; GatewayClass has valid NginxProxy too", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1}}), gatewayClass: validGCWithNp, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, - }, - Valid: true, - EffectiveNginxProxy: &EffectiveNginxProxy{ - IPFamily: helpers.GetPointer(ngfAPIv1alpha2.Dual), }, }, name: "valid http listener; GatewayClass has valid NginxProxy", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{crossNamespaceSecretListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{crossNamespaceSecretListener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-cross-ns-secret", - Source: crossNamespaceSecretListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerRefNotPermitted( - `Certificate ref to secret diff-ns/secret not permitted by any ReferenceGrant`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-cross-ns-secret", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: crossNamespaceSecretListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerRefNotPermitted( + `Certificate ref to secret diff-ns/secret not permitted by any ReferenceGrant`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "invalid attachable https listener with cross-namespace secret; no reference grant", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{listenerInvalidSelector}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{listenerInvalidSelector}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "listener-with-invalid-selector", - Source: listenerInvalidSelector, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `invalid label selector: "invalid" is not a valid label selector operator`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "listener-with-invalid-selector", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: listenerInvalidSelector, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `invalid label selector: "invalid" is not a valid label selector operator`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "attachable http listener with invalid label selector", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{invalidProtocolListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidProtocolListener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-protocol", - Source: invalidProtocolListener, - Valid: false, - Attachable: false, - Conditions: staticConds.NewListenerUnsupportedProtocol( - `protocol: Unsupported value: "TCP": supported values: "HTTP", "HTTPS", "TLS"`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-protocol", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidProtocolListener, + Valid: false, + Attachable: false, + Conditions: staticConds.NewListenerUnsupportedProtocol( + `protocol: Unsupported value: "TCP": supported values: "HTTP", "HTTPS", "TLS"`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + }, }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, }, - Valid: true, }, name: "invalid listener protocol", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ invalidPortListener, invalidHTTPSPortListener, @@ -735,107 +785,132 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-port", - Source: invalidPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 0: port must be between 1-65535`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "invalid-https-port", - Source: invalidHTTPSPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 65536: port must be between 1-65535`, - ), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 0: port must be between 1-65535`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-https-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHTTPSPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 65536: port must be between 1-65535`, + ), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-protected-port", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidProtectedPortListener, + Valid: false, + Attachable: true, + Conditions: staticConds.NewListenerUnsupportedValue( + `port: Invalid value: 9113: port is already in use as MetricsPort`, + ), + SupportedKinds: supportedKindsForListeners, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + }, }, - { - Name: "invalid-protected-port", - Source: invalidProtectedPortListener, - Valid: false, - Attachable: true, - Conditions: staticConds.NewListenerUnsupportedValue( - `port: Invalid value: 9113: port is already in use as MetricsPort`, - ), - SupportedKinds: supportedKindsForListeners, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid ports", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{invalidHostnameListener, invalidHTTPSHostnameListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidHostnameListener, invalidHTTPSHostnameListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-hostname", - Source: invalidHostnameListener, - Valid: false, - Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-hostname", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHostnameListener, + Valid: false, + Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "invalid-https-hostname", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidHTTPSHostnameListener, + Valid: false, + Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "invalid-https-hostname", - Source: invalidHTTPSHostnameListener, - Valid: false, - Conditions: staticConds.NewListenerUnsupportedValue(invalidHostnameMsg), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid hostnames", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{invalidTLSConfigListener}}), + gateway: createGateway(gatewayCfg{name: "gateway1", listeners: []v1.Listener{invalidTLSConfigListener}}), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "invalid-tls-config", - Source: invalidTLSConfigListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerInvalidCertificateRef( - `tls.certificateRefs[0]: Invalid value: test/does-not-exist: secret does not exist`, - ), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "invalid-tls-config", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: invalidTLSConfigListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerInvalidCertificateRef( + `tls.certificateRefs[0]: Invalid value: test/does-not-exist: secret does not exist`, + ), + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "invalid https listener (secret does not exist)", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ foo80Listener1, foo8080Listener, @@ -849,93 +924,108 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8080", - Source: foo8080Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8081", - Source: foo8081Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-8443-https", - Source: foo8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-80", - Source: bar80Listener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8080", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8080Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8081", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8081Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-80", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar80Listener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-8443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar8443HTTPSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "bar-8443-https", - Source: bar8443HTTPSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "multiple valid http/https listeners", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{ foo80Listener1, bar80Listener, @@ -947,91 +1037,110 @@ func TestBuildGateway(t *testing.T) { }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "bar-80", - Source: bar80Listener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-http", - Source: foo443HTTPListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-80-https", - Source: foo80HTTPSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, - }, - { - Name: "foo-443-https-1", - Source: foo443HTTPSListener1, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-80", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar80Listener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-http", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-80-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80HTTPSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict80PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "foo-443-https-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPSListener1, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + SupportedKinds: supportedKindsForListeners, + }, }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - SupportedKinds: supportedKindsForListeners, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, + Valid: true, }, - Valid: true, }, name: "port/protocol collisions", }, { gateway: createGateway( gatewayCfg{ + name: "gateway1", listeners: []v1.Listener{foo80Listener1, foo443HTTPSListener1}, addresses: []v1.GatewaySpecAddress{{}}, }, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayUnsupportedValue("spec." + - "addresses: Forbidden: addresses are not supported", - ), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayUnsupportedValue("spec." + + "addresses: Forbidden: addresses are not supported", + ), + }, }, name: "gateway addresses are not supported", }, @@ -1042,58 +1151,78 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, ), gatewayClass: invalidGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayInvalid("GatewayClass is invalid"), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayInvalid("GatewayClass is invalid"), + }, }, name: "invalid gatewayclass", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}}, ), gatewayClass: nil, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: false, + Conditions: staticConds.NewGatewayInvalid("GatewayClass doesn't exist"), + }, }, name: "nil gatewayclass", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, foo443HTTPListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, foo443HTTPListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "foo-443-http", - Source: foo443HTTPListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "foo-443-http", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443HTTPListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerProtocolConflict(conflict443PortMsg), + SupportedKinds: supportedKindsForListeners, + }, }, }, }, @@ -1101,35 +1230,43 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, splat443HTTPSListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, splat443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: false, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "splat-443-https", - Source: splat443HTTPSListener, - Valid: false, - Attachable: true, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: false, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "splat-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: splat443HTTPSListener, + Valid: false, + Attachable: true, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + Conditions: staticConds.NewListenerHostnameConflict(conflict443HostnameMsg), + SupportedKinds: supportedKindsForListeners, + }, }, }, }, @@ -1137,145 +1274,201 @@ func TestBuildGateway(t *testing.T) { }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo443TLSListener, bar443HTTPSListener}}, + gatewayCfg{name: "gateway1", listeners: []v1.Listener{foo443TLSListener, bar443HTTPSListener}}, ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: true, - Listeners: []*Listener{ - { - Name: "foo-443-tls", - Source: foo443TLSListener, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: []v1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, - }, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, - { - Name: "bar-443-https", - Source: bar443HTTPSListener, - Valid: true, - Attachable: true, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + Valid: true, + Listeners: []*Listener{ + { + Name: "foo-443-tls", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo443TLSListener, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: []v1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[v1.Group](v1.GroupName)}, + }, + }, + { + Name: "bar-443-https", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: bar443HTTPSListener, + Valid: true, + Attachable: true, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secretSameNs)), + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, }, }, name: "https listener and tls listener with non overlapping hostnames", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: invalidKindRef}), + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: invalidKindRef, + }, + ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + + "supported values: \"NginxProxy\"", + ), }, - }, - Valid: true, // invalid parametersRef does not invalidate Gateway. - Conditions: []conditions.Condition{ - staticConds.NewGatewayRefInvalid( - "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + - "supported values: \"NginxProxy\"", - ), - staticConds.NewGatewayInvalidParameters( - "spec.infrastructure.parametersRef.kind: Unsupported value: \"Invalid\": " + - "supported values: \"NginxProxy\"", - ), }, }, name: "invalid parameters ref kind", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: npDoesNotExistRef}), + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: npDoesNotExistRef, + }, + ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, + }, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid parametersRef does not invalidate Gateway. + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefNotFound(), + staticConds.NewGatewayInvalidParameters( + "spec.infrastructure.parametersRef.name: Not found: \"does-not-exist\"", + ), }, - }, - Valid: true, // invalid parametersRef does not invalidate Gateway. - Conditions: []conditions.Condition{ - staticConds.NewGatewayRefNotFound(), - staticConds.NewGatewayInvalidParameters( - "spec.infrastructure.parametersRef.name: Not found: \"does-not-exist\"", - ), }, }, name: "referenced NginxProxy doesn't exist", }, { - gateway: createGateway(gatewayCfg{listeners: []v1.Listener{foo80Listener1}, ref: invalidGwNpRef}), + gateway: createGateway( + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1}, + ref: invalidGwNpRef, + }, + ), gatewayClass: validGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Listeners: []*Listener{ - { - Name: "foo-80-1", - Source: foo80Listener1, - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{}, - L4Routes: map[L4RouteKey]*L4Route{}, - SupportedKinds: supportedKindsForListeners, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + Listeners: []*Listener{ + { + Name: "foo-80-1", + GatewayName: client.ObjectKeyFromObject(getLastCreatedGateway()), + Source: foo80Listener1, + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + SupportedKinds: supportedKindsForListeners, + }, }, - }, - Valid: true, // invalid NginxProxy does not invalidate Gateway. - NginxProxy: &NginxProxy{ - Source: invalidGwNp, - ErrMsgs: field.ErrorList{ - field.Required(field.NewPath("somePath"), "someField"), // fake error + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), + }, + Valid: true, // invalid NginxProxy does not invalidate Gateway. + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: []conditions.Condition{ + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), }, - Valid: false, - }, - Conditions: []conditions.Condition{ - staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), - staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), }, }, name: "invalid NginxProxy", }, { gateway: createGateway( - gatewayCfg{listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}, ref: invalidGwNpRef}, + gatewayCfg{ + name: "gateway1", + listeners: []v1.Listener{foo80Listener1, invalidProtocolListener}, ref: invalidGwNpRef, + }, ), gatewayClass: invalidGC, - expected: &Gateway{ - Source: getLastCreatedGateway(), - Valid: false, - NginxProxy: &NginxProxy{ - Source: invalidGwNp, - ErrMsgs: field.ErrorList{ - field.Required(field.NewPath("somePath"), "someField"), // fake error + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway1"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway1", gcName), }, Valid: false, + NginxProxy: &NginxProxy{ + Source: invalidGwNp, + ErrMsgs: field.ErrorList{ + field.Required(field.NewPath("somePath"), "someField"), // fake error + }, + Valid: false, + }, + Conditions: append( + staticConds.NewGatewayInvalid("GatewayClass is invalid"), + staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), + staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), + ), }, - Conditions: append( - staticConds.NewGatewayInvalid("GatewayClass is invalid"), - staticConds.NewGatewayRefInvalid("somePath: Required value: someField"), - staticConds.NewGatewayInvalidParameters("somePath: Required value: someField"), - ), }, name: "invalid gatewayclass and invalid NginxProxy", }, @@ -1301,7 +1494,7 @@ func TestBuildGateway(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) resolver := newReferenceGrantResolver(test.refGrants) - result := buildGateway(test.gateway, secretResolver, test.gatewayClass, resolver, nginxProxies) + result := buildGateways(test.gateway, secretResolver, test.gatewayClass, resolver, nginxProxies) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) }) } diff --git a/internal/mode/static/state/graph/graph.go b/internal/mode/static/state/graph/graph.go index 88d6b4abe8..61d39fb44c 100644 --- a/internal/mode/static/state/graph/graph.go +++ b/internal/mode/static/state/graph/graph.go @@ -8,7 +8,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" "sigs.k8s.io/gateway-api/apis/v1alpha3" @@ -16,7 +15,6 @@ import ( ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" - "github.com/nginx/nginx-gateway-fabric/internal/framework/controller" "github.com/nginx/nginx-gateway-fabric/internal/framework/controller/index" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" ngftypes "github.com/nginx/nginx-gateway-fabric/internal/framework/types" @@ -47,16 +45,12 @@ type ClusterState struct { type Graph struct { // GatewayClass holds the GatewayClass resource. GatewayClass *GatewayClass - // Gateway holds the winning Gateway resource. - Gateway *Gateway + // Gateways holds the all Gateway resource. + Gateways map[types.NamespacedName]*Gateway // IgnoredGatewayClasses holds the ignored GatewayClass resources, which reference NGINX Gateway Fabric in the // controllerName, but are not configured via the NGINX Gateway Fabric CLI argument. It doesn't hold the GatewayClass // resources that do not belong to the NGINX Gateway Fabric. IgnoredGatewayClasses map[types.NamespacedName]*gatewayv1.GatewayClass - // IgnoredGateways holds the ignored Gateway resources, which belong to the NGINX Gateway Fabric (based on the - // GatewayClassName field of the resource) but ignored. It doesn't hold the Gateway resources that do not belong to - // the NGINX Gateway Fabric. - IgnoredGateways map[types.NamespacedName]*gatewayv1.Gateway // Routes hold Route resources. Routes map[RouteKey]*L7Route // L4Routes hold L4Route resources. @@ -72,23 +66,16 @@ type Graph struct { ReferencedServices map[types.NamespacedName]*ReferencedService // ReferencedCaCertConfigMaps includes ConfigMaps that have been referenced by any BackendTLSPolicies. ReferencedCaCertConfigMaps map[types.NamespacedName]*CaCertConfigMap - // ReferencedNginxProxies includes NginxProxies that have been referenced by a GatewayClass or the winning Gateway. + // ReferencedNginxProxies includes NginxProxies that have been referenced by a GatewayClass or a Gateway. ReferencedNginxProxies map[types.NamespacedName]*NginxProxy // BackendTLSPolicies holds BackendTLSPolicy resources. BackendTLSPolicies map[types.NamespacedName]*BackendTLSPolicy // NGFPolicies holds all NGF Policies. NGFPolicies map[PolicyKey]*Policy - // GlobalSettings contains global settings from the current state of the graph that may be - // needed for policy validation or generation if certain policies rely on those global settings. - GlobalSettings *policies.GlobalSettings // SnippetsFilters holds all the SnippetsFilters. SnippetsFilters map[types.NamespacedName]*SnippetsFilter // PlusSecrets holds the secrets related to NGINX Plus licensing. PlusSecrets map[types.NamespacedName][]PlusSecretFile - // LatestReloadResult is the latest result of applying config to nginx for this Gateway. - LatestReloadResult NginxReloadResult - // DeploymentName is the name of the nginx Deployment for this Gateway. - DeploymentName types.NamespacedName } // NginxReloadResult describes the result of an NGINX reload. @@ -125,7 +112,7 @@ func (g *Graph) IsReferenced(resourceType ngftypes.ObjectType, nsname types.Name // `exists` does not cover the case highlighted above by `existed` and vice versa so both are needed. _, existed := g.ReferencedNamespaces[nsname] - exists := isNamespaceReferenced(obj, g.Gateway) + exists := isNamespaceReferenced(obj, g.Gateways) return existed || exists // Service reference exists if at least one HTTPRoute references it. case *v1.Service: @@ -138,7 +125,7 @@ func (g *Graph) IsReferenced(resourceType ngftypes.ObjectType, nsname types.Name // Service Namespace should be the same Namespace as the EndpointSlice _, exists := g.ReferencedServices[types.NamespacedName{Namespace: nsname.Namespace, Name: svcName}] return exists - // NginxProxy reference exists if the GatewayClass or winning Gateway references it. + // NginxProxy reference exists if the GatewayClass or Gateway references it. case *ngfAPIv1alpha2.NginxProxy: _, exists := g.ReferencedNginxProxies[nsname] return exists @@ -190,11 +177,11 @@ func (g *Graph) gatewayAPIResourceExist(ref v1alpha2.LocalPolicyTargetReference, switch kind := ref.Kind; kind { case kinds.Gateway: - if g.Gateway == nil { + if len(g.Gateways) == 0 { return false } - return gatewayExists(refNsName, g.Gateway.Source, g.IgnoredGateways) + return gatewayExists(refNsName, g.Gateways) case kinds.HTTPRoute, kinds.GRPCRoute: _, exists := g.Routes[routeKeyForKind(kind, refNsName)] return exists @@ -223,7 +210,7 @@ func BuildGraph( state.NginxProxies, validators.GenericValidator, processedGwClasses.Winner, - processedGws.Winner, + processedGws, ) gc := buildGatewayClass( @@ -237,8 +224,8 @@ func BuildGraph( refGrantResolver := newReferenceGrantResolver(state.ReferenceGrants) - gw := buildGateway( - processedGws.Winner, + gws := buildGateways( + processedGws, secretResolver, gc, refGrantResolver, @@ -250,80 +237,57 @@ func BuildGraph( configMapResolver, secretResolver, controllerName, - gw, + gws, ) processedSnippetsFilters := processSnippetsFilters(state.SnippetsFilters) - var effectiveNginxProxy *EffectiveNginxProxy - if gw != nil { - effectiveNginxProxy = gw.EffectiveNginxProxy - } routes := buildRoutesForGateways( validators.HTTPFieldsValidator, state.HTTPRoutes, state.GRPCRoutes, - processedGws.GetAllNsNames(), - effectiveNginxProxy, + gws, processedSnippetsFilters, ) l4routes := buildL4RoutesForGateways( state.TLSRoutes, - processedGws.GetAllNsNames(), state.Services, - effectiveNginxProxy, + gws, refGrantResolver, ) - bindRoutesToListeners(routes, l4routes, gw, state.Namespaces) addBackendRefsToRouteRules( routes, refGrantResolver, state.Services, processedBackendTLSPolicies, - effectiveNginxProxy, ) + bindRoutesToListeners(routes, l4routes, gws, state.Namespaces) - referencedNamespaces := buildReferencedNamespaces(state.Namespaces, gw) + referencedNamespaces := buildReferencedNamespaces(state.Namespaces, gws) - referencedServices := buildReferencedServices(routes, l4routes, gw) + referencedServices := buildReferencedServices(routes, l4routes, gws) + + addGatewaysForBackendTLSPolicies(processedBackendTLSPolicies, referencedServices) - var globalSettings *policies.GlobalSettings - if gw != nil && gw.EffectiveNginxProxy != nil { - globalSettings = &policies.GlobalSettings{ - NginxProxyValid: true, // for effective nginx proxy to be set, the config must be valid - TelemetryEnabled: telemetryEnabledForNginxProxy(gw.EffectiveNginxProxy), - } - } // policies must be processed last because they rely on the state of the other resources in the graph processedPolicies := processPolicies( state.NGFPolicies, validators.PolicyValidator, - processedGws, routes, referencedServices, - globalSettings, + gws, ) setPlusSecretContent(state.Secrets, plusSecrets) - var deploymentName types.NamespacedName - if gw != nil { - deploymentName = types.NamespacedName{ - Namespace: gw.Source.Namespace, - Name: controller.CreateNginxResourceName(gw.Source.Name, gcName), - } - } - g := &Graph{ GatewayClass: gc, - Gateway: gw, - DeploymentName: deploymentName, + Gateways: gws, Routes: routes, L4Routes: l4routes, IgnoredGatewayClasses: processedGwClasses.Ignored, - IgnoredGateways: processedGws.Ignored, ReferencedSecrets: secretResolver.getResolvedSecrets(), ReferencedNamespaces: referencedNamespaces, ReferencedServices: referencedServices, @@ -331,31 +295,21 @@ func BuildGraph( ReferencedNginxProxies: processedNginxProxies, BackendTLSPolicies: processedBackendTLSPolicies, NGFPolicies: processedPolicies, - GlobalSettings: globalSettings, SnippetsFilters: processedSnippetsFilters, PlusSecrets: plusSecrets, } - g.attachPolicies(controllerName) + g.attachPolicies(validators.PolicyValidator, controllerName) return g } -func gatewayExists( - gwNsName types.NamespacedName, - winner *gatewayv1.Gateway, - ignored map[types.NamespacedName]*gatewayv1.Gateway, -) bool { - if winner == nil { +func gatewayExists(gwNsName types.NamespacedName, gateways map[types.NamespacedName]*Gateway) bool { + if len(gateways) == 0 { return false } - if client.ObjectKeyFromObject(winner) == gwNsName { - return true - } - - _, exists := ignored[gwNsName] - + _, exists := gateways[gwNsName] return exists } diff --git a/internal/mode/static/state/graph/graph_test.go b/internal/mode/static/state/graph/graph_test.go index 3b2fcb5a6f..0fdeecaeb3 100644 --- a/internal/mode/static/state/graph/graph_test.go +++ b/internal/mode/static/state/graph/graph_test.go @@ -85,7 +85,7 @@ func TestBuildGraph(t *testing.T) { }, Valid: true, IsReferenced: true, - Gateway: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Gateways: []types.NamespacedName{{Namespace: testNs, Name: "gateway-1"}}, Conditions: btpAcceptedConds, CaCertRef: types.NamespacedName{Namespace: "service", Name: "configmap"}, } @@ -165,11 +165,12 @@ func TestBuildGraph(t *testing.T) { createValidRuleWithBackendRefs := func(matches []gatewayv1.HTTPRouteMatch) RouteRule { refs := []BackendRef{ { - SvcNsName: types.NamespacedName{Namespace: "service", Name: "foo"}, - ServicePort: v1.ServicePort{Port: 80}, - Valid: true, - Weight: 1, - BackendTLSPolicy: &btp, + SvcNsName: types.NamespacedName{Namespace: "service", Name: "foo"}, + ServicePort: v1.ServicePort{Port: 80}, + Valid: true, + Weight: 1, + BackendTLSPolicy: &btp, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, } rbrs := []RouteBackendRef{ @@ -370,73 +371,75 @@ func TestBuildGraph(t *testing.T) { }, } - createGateway := func(name, nginxProxyName string) *gatewayv1.Gateway { - return &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNs, - Name: name, - }, - Spec: gatewayv1.GatewaySpec{ - GatewayClassName: gcName, - Infrastructure: &gatewayv1.GatewayInfrastructure{ - ParametersRef: &gatewayv1.LocalParametersReference{ - Group: ngfAPIv1alpha2.GroupName, - Kind: kinds.NginxProxy, - Name: nginxProxyName, - }, + createGateway := func(name, nginxProxyName string) *Gateway { + return &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNs, + Name: name, }, - Listeners: []gatewayv1.Listener{ - { - Name: "listener-80-1", - Hostname: nil, - Port: 80, - Protocol: gatewayv1.HTTPProtocolType, - AllowedRoutes: &gatewayv1.AllowedRoutes{ - Namespaces: &gatewayv1.RouteNamespaces{ - From: helpers.GetPointer(gatewayv1.NamespacesFromSelector), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "allowed", + Spec: gatewayv1.GatewaySpec{ + GatewayClassName: gcName, + Infrastructure: &gatewayv1.GatewayInfrastructure{ + ParametersRef: &gatewayv1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: nginxProxyName, + }, + }, + Listeners: []gatewayv1.Listener{ + { + Name: "listener-80-1", + Hostname: nil, + Port: 80, + Protocol: gatewayv1.HTTPProtocolType, + AllowedRoutes: &gatewayv1.AllowedRoutes{ + Namespaces: &gatewayv1.RouteNamespaces{ + From: helpers.GetPointer(gatewayv1.NamespacesFromSelector), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "allowed", + }, }, }, }, }, - }, - { - Name: "listener-443-1", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.com")), - Port: 443, - TLS: &gatewayv1.GatewayTLSConfig{ - Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), - CertificateRefs: []gatewayv1.SecretObjectReference{ - { - Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), - Name: gatewayv1.ObjectName(secret.Name), - Namespace: helpers.GetPointer(gatewayv1.Namespace(secret.Namespace)), + { + Name: "listener-443-1", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.com")), + Port: 443, + TLS: &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secret.Name), + Namespace: helpers.GetPointer(gatewayv1.Namespace(secret.Namespace)), + }, }, }, + Protocol: gatewayv1.HTTPSProtocolType, }, - Protocol: gatewayv1.HTTPSProtocolType, - }, - { - Name: "listener-443-2", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), - Port: 443, - Protocol: gatewayv1.TLSProtocolType, - TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, - AllowedRoutes: &gatewayv1.AllowedRoutes{ - Kinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + { + Name: "listener-443-2", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), + Port: 443, + Protocol: gatewayv1.TLSProtocolType, + TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + AllowedRoutes: &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, }, - }, - { - Name: "listener-8443", - Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), - Port: 8443, - Protocol: gatewayv1.TLSProtocolType, - TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + { + Name: "listener-8443", + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")), + Port: 8443, + Protocol: gatewayv1.TLSProtocolType, + TLS: &gatewayv1.GatewayTLSConfig{Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough)}, + }, }, }, }, @@ -447,8 +450,6 @@ func TestBuildGraph(t *testing.T) { gw2 := createGateway("gateway-2", "np-2") // np1 is referenced by gw1 and sets the nginx error log to error. - // Since gw1 is the winning gateway, we expect this nginx proxy to be configured and merged with the gateway class - // nginx proxy configuration. np1 := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ Name: "np-1", @@ -462,7 +463,6 @@ func TestBuildGraph(t *testing.T) { } // np2 is referenced by gw2 and sets the IPFamily to IPv6. - // Since gw2 is not the winning gateway, we do not expect this nginx proxy to be configured. np2 := &ngfAPIv1alpha2.NginxProxy{ ObjectMeta: metav1.ObjectMeta{ Name: "np-2", @@ -584,6 +584,25 @@ func TestBuildGraph(t *testing.T) { }, } + // np1Effective is the combined NginxProxy of npGlobal and np1 + np1Effective := &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, + }, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + }, + } + // NGF Policies // // We have to use real policies here instead of a mocks because the Diff function we use in the test fails when @@ -620,7 +639,8 @@ func TestBuildGraph(t *testing.T) { Nsname: types.NamespacedName{Namespace: testNs, Name: "hr-1"}, }, }, - Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, } gwPolicyKey := PolicyKey{GVK: polGVK, NsName: types.NamespacedName{Namespace: testNs, Name: "gwPolicy"}} @@ -653,7 +673,8 @@ func TestBuildGraph(t *testing.T) { Nsname: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, }, }, - Valid: true, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, } createStateWithGatewayClass := func(gc *gatewayv1.GatewayClass) ClusterState { @@ -662,8 +683,8 @@ func TestBuildGraph(t *testing.T) { client.ObjectKeyFromObject(gc): gc, }, Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ - client.ObjectKeyFromObject(gw1): gw1, - client.ObjectKeyFromObject(gw2): gw2, + client.ObjectKeyFromObject(gw1.Source): gw1.Source, + client.ObjectKeyFromObject(gw2.Source): gw2.Source, }, HTTPRoutes: map[types.NamespacedName]*gatewayv1.HTTPRoute{ client.ObjectKeyFromObject(hr1): hr1, @@ -722,13 +743,21 @@ func TestBuildGraph(t *testing.T) { Source: hr1, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: hr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-80-1": {"foo.example.com"}}, - ListenerPort: 80, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-80-1", + ): {"foo.example.com"}, + }, + ListenerPort: 80, }, }, }, @@ -745,13 +774,22 @@ func TestBuildGraph(t *testing.T) { Source: tr, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443-2": {"fizz.example.org"}, - "listener-8443": {"fizz.example.org"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-443-2", + ): {"fizz.example.org"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-8443", + ): {"fizz.example.org"}, }, }, }, @@ -766,7 +804,8 @@ func TestBuildGraph(t *testing.T) { ServicePort: v1.ServicePort{ Port: 80, }, - Valid: true, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, } @@ -777,12 +816,15 @@ func TestBuildGraph(t *testing.T) { Source: tr2, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, Attachment: &ParentRefAttachmentStatus{ Attached: false, AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteHostnameConflict(), + FailedConditions: []conditions.Condition{staticConds.NewRouteHostnameConflict()}, }, }, }, @@ -796,7 +838,8 @@ func TestBuildGraph(t *testing.T) { ServicePort: v1.ServicePort{ Port: 80, }, - Valid: true, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, } @@ -808,13 +851,21 @@ func TestBuildGraph(t *testing.T) { Source: gr, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: gr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-80-1": {"bar.example.com"}}, - ListenerPort: 80, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-80-1", + ): {"bar.example.com"}, + }, + ListenerPort: 80, }, }, }, @@ -833,13 +884,21 @@ func TestBuildGraph(t *testing.T) { Source: hr3, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gw1), + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw1.Source), + EffectiveNginxProxy: np1Effective, + }, SectionName: hr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ - Attached: true, - AcceptedHostnames: map[string][]string{"listener-443-1": {"foo.example.com"}}, - ListenerPort: 443, + Attached: true, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw1.Source), + "listener-443-1", + ): {"foo.example.com"}, + }, + ListenerPort: 443, }, }, }, @@ -865,82 +924,165 @@ func TestBuildGraph(t *testing.T) { Valid: true, }, }, - Gateway: &Gateway{ - Source: gw1, - Listeners: []*Listener{ - { - Name: "listener-80-1", - Source: gw1.Spec.Listeners[0], - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{ - CreateRouteKey(hr1): routeHR1, - CreateRouteKey(gr): routeGR, + Gateways: map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gateway-1"}: { + Source: gw1.Source, + Listeners: []*Listener{ + { + Name: "listener-80-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{ + CreateRouteKey(hr1): routeHR1, + CreateRouteKey(gr): routeGR, + }, + SupportedKinds: supportedKindsForListeners, + L4Routes: map[L4RouteKey]*L4Route{}, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), + }, + { + Name: "listener-443-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{CreateRouteKey(hr3): routeHR3}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "listener-443-2", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[2], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + }, + { + Name: "listener-8443", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-1"}, + Source: gw1.Source.Spec.Listeners[3], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, - SupportedKinds: supportedKindsForListeners, - L4Routes: map[L4RouteKey]*L4Route{}, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), }, - { - Name: "listener-443-1", - Source: gw1.Spec.Listeners[1], - Valid: true, - Attachable: true, - Routes: map[RouteKey]*L7Route{CreateRouteKey(hr3): routeHR3}, - L4Routes: map[L4RouteKey]*L4Route{}, - ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), - SupportedKinds: supportedKindsForListeners, + Valid: true, + Policies: []*Policy{processedGwPolicy}, + NginxProxy: &NginxProxy{ + Source: np1, + Valid: true, }, - { - Name: "listener-443-2", - Source: gw1.Spec.Listeners[2], - Valid: true, - Attachable: true, - L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, - Routes: map[RouteKey]*L7Route{}, - SupportedKinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, }, - }, - { - Name: "listener-8443", - Source: gw1.Spec.Listeners[3], - Valid: true, - Attachable: true, - L4Routes: map[L4RouteKey]*L4Route{CreateRouteKeyL4(tr): routeTR}, - Routes: map[RouteKey]*L7Route{}, - SupportedKinds: []gatewayv1.RouteGroupKind{ - {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), }, }, + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-1-my-class", + }, }, - Valid: true, - Policies: []*Policy{processedGwPolicy}, - NginxProxy: &NginxProxy{ - Source: np1, - Valid: true, - }, - EffectiveNginxProxy: &EffectiveNginxProxy{ - Telemetry: &ngfAPIv1alpha2.Telemetry{ - Exporter: &ngfAPIv1alpha2.TelemetryExporter{ - Endpoint: helpers.GetPointer("1.2.3.4:123"), - Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), - BatchSize: helpers.GetPointer(int32(512)), - BatchCount: helpers.GetPointer(int32(4)), + {Namespace: testNs, Name: "gateway-2"}: { + Source: gw2.Source, + Listeners: []*Listener{ + { + Name: "listener-80-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[0], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: supportedKindsForListeners, + L4Routes: map[L4RouteKey]*L4Route{}, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"app": "allowed"}), + }, + { + Name: "listener-443-1", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[1], + Valid: true, + Attachable: true, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + ResolvedSecret: helpers.GetPointer(client.ObjectKeyFromObject(secret)), + SupportedKinds: supportedKindsForListeners, + }, + { + Name: "listener-443-2", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[2], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + }, + { + Name: "listener-8443", + GatewayName: types.NamespacedName{Namespace: testNs, Name: "gateway-2"}, + Source: gw2.Source.Spec.Listeners[3], + Valid: true, + Attachable: true, + L4Routes: map[L4RouteKey]*L4Route{}, + Routes: map[RouteKey]*L7Route{}, + SupportedKinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, }, - ServiceName: helpers.GetPointer("my-svc"), - SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ - {Key: "key", Value: "value"}, + }, + Valid: true, + NginxProxy: &NginxProxy{ + Source: np2, + Valid: true, + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("1.2.3.4:123"), + Interval: helpers.GetPointer(ngfAPIv1alpha1.Duration("5s")), + BatchSize: helpers.GetPointer(int32(512)), + BatchCount: helpers.GetPointer(int32(4)), + }, + ServiceName: helpers.GetPointer("my-svc"), + SpanAttributes: []ngfAPIv1alpha1.SpanAttribute{ + {Key: "key", Value: "value"}, + }, }, + IPFamily: helpers.GetPointer(ngfAPIv1alpha2.IPv6), }, - Logging: &ngfAPIv1alpha2.NginxLogging{ - ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelError), + Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway-2-my-class", }, }, - Conditions: []conditions.Condition{staticConds.NewGatewayResolvedRefs()}, - }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Namespace: testNs, Name: "gateway-2"}: gw2, }, Routes: map[RouteKey]*L7Route{ CreateRouteKey(hr1): routeHR1, @@ -964,8 +1106,12 @@ func TestBuildGraph(t *testing.T) { client.ObjectKeyFromObject(ns): ns, }, ReferencedServices: map[types.NamespacedName]*ReferencedService{ - client.ObjectKeyFromObject(svc): {}, - client.ObjectKeyFromObject(svc1): {}, + client.ObjectKeyFromObject(svc): { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: testNs, Name: "gateway-1"}: {}}, + }, + client.ObjectKeyFromObject(svc1): { + GatewayNsNames: map[types.NamespacedName]struct{}{{Namespace: testNs, Name: "gateway-1"}: {}}, + }, }, ReferencedCaCertConfigMaps: map[types.NamespacedName]*CaCertConfigMap{ client.ObjectKeyFromObject(cm): { @@ -987,15 +1133,15 @@ func TestBuildGraph(t *testing.T) { Source: np1, Valid: true, }, + client.ObjectKeyFromObject(np2): { + Source: np2, + Valid: true, + }, }, NGFPolicies: map[PolicyKey]*Policy{ hrPolicyKey: processedRoutePolicy, gwPolicyKey: processedGwPolicy, }, - GlobalSettings: &policies.GlobalSettings{ - NginxProxyValid: true, - TelemetryEnabled: true, - }, SnippetsFilters: map[types.NamespacedName]*SnippetsFilter{ client.ObjectKeyFromObject(unreferencedSnippetsFilter): processedUnrefSnippetsFilter, client.ObjectKeyFromObject(referencedSnippetsFilter): processedRefSnippetsFilter, @@ -1009,10 +1155,6 @@ func TestBuildGraph(t *testing.T) { }, }, }, - DeploymentName: types.NamespacedName{ - Namespace: "test", - Name: "gateway-1-my-class", - }, } } @@ -1165,15 +1307,17 @@ func TestIsReferenced(t *testing.T) { endpointSliceNotInGraph := createEndpointSlice("endpointSliceNotInGraph", "serviceNotInGraph") emptyEndpointSlice := &discoveryV1.EndpointSlice{} - gw := &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + gw := map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, }, + Valid: true, }, - Valid: true, } nsNotInGraphButInGateway := &v1.Namespace{ @@ -1217,7 +1361,7 @@ func TestIsReferenced(t *testing.T) { } graph := &Graph{ - Gateway: gw, + Gateways: gw, ReferencedSecrets: map[types.NamespacedName]*Secret{ client.ObjectKeyFromObject(baseSecret): { Source: baseSecret, @@ -1413,17 +1557,16 @@ func TestIsNGFPolicyRelevant(t *testing.T) { getGraph := func() *Graph { return &Graph{ - Gateway: &Gateway{ - Source: &gatewayv1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: "test", + Gateways: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gw"}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: "test", + }, }, }, }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Namespace: "test", Name: "ignored"}: {}, - }, Routes: map[RouteKey]*L7Route{ hrKey: {}, grKey: {}, @@ -1482,13 +1625,6 @@ func TestIsNGFPolicyRelevant(t *testing.T) { nsname: types.NamespacedName{Namespace: "test", Name: "ref-gw"}, expRelevant: true, }, - { - name: "relevant; policy references an ignored gateway", - graph: getGraph(), - policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "ignored")), - nsname: types.NamespacedName{Namespace: "test", Name: "ref-ignored"}, - expRelevant: true, - }, { name: "relevant; policy references an httproute in the graph", graph: getGraph(), @@ -1527,7 +1663,7 @@ func TestIsNGFPolicyRelevant(t *testing.T) { { name: "irrelevant; policy references a Gateway, but the graph's Gateway is nil", graph: getModifiedGraph(func(g *Graph) *Graph { - g.Gateway = nil + g.Gateways = nil return g }), policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "diff")), @@ -1537,7 +1673,8 @@ func TestIsNGFPolicyRelevant(t *testing.T) { { name: "irrelevant; policy references a Gateway, but the graph's Gateway.Source is nil", graph: getModifiedGraph(func(g *Graph) *Graph { - g.Gateway.Source = nil + gw := g.Gateways[types.NamespacedName{Namespace: "test", Name: "gw"}] + gw.Source = nil return g }), policy: getPolicy(createTestRef(kinds.Gateway, gatewayv1.GroupName, "diff")), @@ -1610,3 +1747,38 @@ func TestIsNGFPolicyRelevantPanics(t *testing.T) { g.Expect(isRelevant).To(Panic()) } + +func TestGatewayExists(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + tests := []struct { + gateways map[types.NamespacedName]*Gateway + gwNsName types.NamespacedName + name string + expectedResult bool + }{ + { + name: "gateway exists", + gwNsName: types.NamespacedName{Namespace: "test", Name: "gw"}, + gateways: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gw"}: {}, + {Namespace: "test", Name: "gw2"}: {}, + }, + expectedResult: true, + }, + { + name: "gateway does not exist", + gwNsName: types.NamespacedName{Namespace: "test", Name: "gw"}, + gateways: nil, + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g.Expect(gatewayExists(test.gwNsName, test.gateways)).To(Equal(test.expectedResult)) + }) + } +} diff --git a/internal/mode/static/state/graph/grpcroute.go b/internal/mode/static/state/graph/grpcroute.go index aaacdb0fff..f114a130ee 100644 --- a/internal/mode/static/state/graph/grpcroute.go +++ b/internal/mode/static/state/graph/grpcroute.go @@ -19,8 +19,7 @@ import ( func buildGRPCRoute( validator validation.HTTPFieldsValidator, ghr *v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, - http2disabled bool, + gws map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) *L7Route { r := &L7Route{ @@ -28,7 +27,7 @@ func buildGRPCRoute( RouteType: RouteTypeGRPC, } - sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gws) if err != nil { r.Valid = false @@ -40,14 +39,6 @@ func buildGRPCRoute( } r.ParentRefs = sectionNameRefs - if http2disabled { - r.Valid = false - msg := "HTTP2 is disabled - cannot configure GRPCRoutes" - r.Conditions = append(r.Conditions, staticConds.NewRouteUnsupportedConfiguration(msg)) - - return r - } - if err := validateHostnames( ghr.Spec.Hostnames, field.NewPath("spec").Child("hostnames"), @@ -78,9 +69,8 @@ func buildGRPCMirrorRoutes( routes map[RouteKey]*L7Route, l7route *L7Route, route *v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, - http2disabled bool, ) { for idx, rule := range l7route.Spec.Rules { if rule.Filters.Valid { @@ -110,8 +100,7 @@ func buildGRPCMirrorRoutes( mirrorRoute := buildGRPCRoute( validation.SkipValidator{}, tmpMirrorRoute, - gatewayNsNames, - http2disabled, + gateways, snippetsFilters, ) diff --git a/internal/mode/static/state/graph/grpcroute_test.go b/internal/mode/static/state/graph/grpcroute_test.go index e256e4632e..255a201186 100644 --- a/internal/mode/static/state/graph/grpcroute_test.go +++ b/internal/mode/static/state/graph/grpcroute_test.go @@ -84,6 +84,21 @@ func TestBuildGRPCRoutes(t *testing.T) { t.Parallel() gwNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + snippetsFilterRef := v1.GRPCRouteFilter{ Type: v1.GRPCRouteFilterExtensionRef, ExtensionRef: &v1.LocalObjectReference{ @@ -127,12 +142,12 @@ func TestBuildGRPCRoutes(t *testing.T) { } tests := []struct { - expected map[RouteKey]*L7Route - name string - gwNsNames []types.NamespacedName + expected map[RouteKey]*L7Route + gateways map[types.NamespacedName]*Gateway + name string }{ { - gwNsNames: []types.NamespacedName{gwNsName}, + gateways: gateways, expected: map[RouteKey]*L7Route{ CreateRouteKey(gr): { RouteType: RouteTypeGRPC, @@ -140,7 +155,7 @@ func TestBuildGRPCRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: CreateParentRefGateway(gateways[gwNsName]), SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -187,18 +202,14 @@ func TestBuildGRPCRoutes(t *testing.T) { name: "normal case", }, { - gwNsNames: []types.NamespacedName{}, - expected: nil, - name: "no gateways", + gateways: nil, + expected: nil, + name: "no gateways", }, } validator := &validationfakes.FakeHTTPFieldsValidator{} - npCfg := &EffectiveNginxProxy{ - DisableHTTP2: helpers.GetPointer(false), - } - for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -218,8 +229,7 @@ func TestBuildGRPCRoutes(t *testing.T) { validator, map[types.NamespacedName]*v1.HTTPRoute{}, grRoutes, - test.gwNsNames, - npCfg, + test.gateways, snippetsFilters, ) g.Expect(helpers.Diff(test.expected, routes)).To(BeEmpty()) @@ -229,7 +239,20 @@ func TestBuildGRPCRoutes(t *testing.T) { func TestBuildGRPCRoute(t *testing.T) { t.Parallel() - gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + + gw := &Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + } + gatewayNsName := client.ObjectKeyFromObject(gw.Source) methodMatchRule := createGRPCMethodMatch("myService", "myMethod", "Exact") headersMatchRule := createGRPCHeadersMatch("Exact", "MyHeader", "SomeValue") @@ -488,11 +511,10 @@ func TestBuildGRPCRoute(t *testing.T) { } tests := []struct { - validator *validationfakes.FakeHTTPFieldsValidator - gr *v1.GRPCRoute - expected *L7Route - name string - http2disabled bool + validator *validationfakes.FakeHTTPFieldsValidator + gr *v1.GRPCRoute + expected *L7Route + name string }{ { validator: createAllValidValidator(), @@ -503,7 +525,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grBoth.Spec.ParentRefs[0].SectionName, }, }, @@ -544,7 +566,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grEmptyMatch.Spec.ParentRefs[0].SectionName, }, }, @@ -576,7 +598,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grValidFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -619,7 +641,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesEmptyMethodFields.Spec.ParentRefs[0].SectionName, }, }, @@ -663,7 +685,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesInvalidMethodFields.Spec.ParentRefs[0].SectionName, }, }, @@ -700,28 +722,6 @@ func TestBuildGRPCRoute(t *testing.T) { }, name: "invalid route with duplicate sectionName", }, - { - validator: createAllValidValidator(), - gr: grBoth, - expected: &L7Route{ - RouteType: RouteTypeGRPC, - Source: grBoth, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: gatewayNsName, - SectionName: grBoth.Spec.ParentRefs[0].SectionName, - }, - }, - Conditions: []conditions.Condition{ - staticConds.NewRouteUnsupportedConfiguration( - `HTTP2 is disabled - cannot configure GRPCRoutes`, - ), - }, - }, - http2disabled: true, - name: "invalid route with disabled http2", - }, { validator: createAllValidValidator(), gr: grOneInvalid, @@ -733,7 +733,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grOneInvalid.Spec.ParentRefs[0].SectionName, }, }, @@ -779,7 +779,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHeadersInvalidType.Spec.ParentRefs[0].SectionName, }, }, @@ -817,7 +817,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHeadersEmptyType.Spec.ParentRefs[0].SectionName, }, }, @@ -855,7 +855,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidMatchesNilMethodType.Spec.ParentRefs[0].SectionName, }, }, @@ -892,7 +892,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -937,7 +937,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidHostname.Spec.ParentRefs[0].SectionName, }, }, @@ -960,7 +960,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -998,7 +998,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -1037,7 +1037,7 @@ func TestBuildGRPCRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: grInvalidAndUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -1071,7 +1071,9 @@ func TestBuildGRPCRoute(t *testing.T) { }, } - gatewayNsNames := []types.NamespacedName{gatewayNsName} + gws := map[types.NamespacedName]*Gateway{ + gatewayNsName: gw, + } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1081,7 +1083,7 @@ func TestBuildGRPCRoute(t *testing.T) { snippetsFilters := map[types.NamespacedName]*SnippetsFilter{ {Namespace: "test", Name: "sf"}: {Valid: true}, } - route := buildGRPCRoute(test.validator, test.gr, gatewayNsNames, test.http2disabled, snippetsFilters) + route := buildGRPCRoute(test.validator, test.gr, gws, snippetsFilters) g.Expect(helpers.Diff(test.expected, route)).To(BeEmpty()) }) } @@ -1089,8 +1091,24 @@ func TestBuildGRPCRoute(t *testing.T) { func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { t.Parallel() + gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gatewayNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + // Create a route with a request mirror filter and another random filter mirrorFilter := v1.GRPCRouteFilter{ Type: v1.GRPCRouteFilterRequestMirror, @@ -1168,7 +1186,7 @@ func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gateways[gatewayNsName]), SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -1213,16 +1231,15 @@ func TestBuildGRPCRouteWithMirrorRoutes(t *testing.T) { } validator := &validationfakes.FakeHTTPFieldsValidator{} - gatewayNsNames := []types.NamespacedName{gatewayNsName} snippetsFilters := map[types.NamespacedName]*SnippetsFilter{} g := NewWithT(t) routes := map[RouteKey]*L7Route{} - l7route := buildGRPCRoute(validator, gr, gatewayNsNames, false, snippetsFilters) + l7route := buildGRPCRoute(validator, gr, gateways, snippetsFilters) g.Expect(l7route).NotTo(BeNil()) - buildGRPCMirrorRoutes(routes, l7route, gr, gatewayNsNames, snippetsFilters, false) + buildGRPCMirrorRoutes(routes, l7route, gr, gateways, snippetsFilters) obj, ok := expectedMirrorRoute.Source.(*v1.GRPCRoute) g.Expect(ok).To(BeTrue()) diff --git a/internal/mode/static/state/graph/httproute.go b/internal/mode/static/state/graph/httproute.go index f10df6965b..408d742dd5 100644 --- a/internal/mode/static/state/graph/httproute.go +++ b/internal/mode/static/state/graph/httproute.go @@ -25,7 +25,7 @@ var ( func buildHTTPRoute( validator validation.HTTPFieldsValidator, ghr *v1.HTTPRoute, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) *L7Route { r := &L7Route{ @@ -33,7 +33,7 @@ func buildHTTPRoute( RouteType: RouteTypeHTTP, } - sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(ghr.Spec.ParentRefs, ghr.Namespace, gws) if err != nil { r.Valid = false @@ -75,7 +75,7 @@ func buildHTTPMirrorRoutes( routes map[RouteKey]*L7Route, l7route *L7Route, route *v1.HTTPRoute, - gatewayNsNames []types.NamespacedName, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) { for idx, rule := range l7route.Spec.Rules { @@ -106,7 +106,7 @@ func buildHTTPMirrorRoutes( mirrorRoute := buildHTTPRoute( validation.SkipValidator{}, tmpMirrorRoute, - gatewayNsNames, + gateways, snippetsFilters, ) diff --git a/internal/mode/static/state/graph/httproute_test.go b/internal/mode/static/state/graph/httproute_test.go index ce0f794bc6..ee0d2dac27 100644 --- a/internal/mode/static/state/graph/httproute_test.go +++ b/internal/mode/static/state/graph/httproute_test.go @@ -91,8 +91,21 @@ func addFilterToPath(hr *gatewayv1.HTTPRoute, path string, filter gatewayv1.HTTP func TestBuildHTTPRoutes(t *testing.T) { t.Parallel() + gwNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + }, + } + hr := createHTTPRoute("hr-1", gwNsName.Name, "example.com", "/") snippetsFilterRef := gatewayv1.HTTPRouteFilter{ Type: gatewayv1.HTTPRouteFilterExtensionRef, @@ -133,12 +146,12 @@ func TestBuildHTTPRoutes(t *testing.T) { } tests := []struct { - expected map[RouteKey]*L7Route - name string - gwNsNames []types.NamespacedName + expected map[RouteKey]*L7Route + gateways map[types.NamespacedName]*Gateway + name string }{ { - gwNsNames: []types.NamespacedName{gwNsName}, + gateways: gateways, expected: map[RouteKey]*L7Route{ CreateRouteKey(hr): { Source: hr, @@ -146,7 +159,7 @@ func TestBuildHTTPRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: CreateParentRefGateway(gateways[gwNsName]), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -193,9 +206,9 @@ func TestBuildHTTPRoutes(t *testing.T) { name: "normal case", }, { - gwNsNames: []types.NamespacedName{}, - expected: nil, - name: "no gateways", + gateways: map[types.NamespacedName]*Gateway{}, + expected: nil, + name: "no gateways", }, } @@ -220,8 +233,7 @@ func TestBuildHTTPRoutes(t *testing.T) { validator, hrRoutes, map[types.NamespacedName]*gatewayv1.GRPCRoute{}, - test.gwNsNames, - nil, + test.gateways, snippetsFilters, ) g.Expect(helpers.Diff(test.expected, routes)).To(BeEmpty()) @@ -236,7 +248,16 @@ func TestBuildHTTPRoute(t *testing.T) { invalidRedirectHostname = "invalid.example.com" ) - gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gw := &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + } + gatewayNsName := client.ObjectKeyFromObject(gw.Source) // route with valid filter validFilter := gatewayv1.HTTPRouteFilter{ @@ -358,7 +379,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -401,7 +422,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatchesEmptyPathType.Spec.ParentRefs[0].SectionName, }, }, @@ -447,7 +468,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatchesEmptyPathValue.Spec.ParentRefs[0].SectionName, }, }, @@ -490,7 +511,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidHostname.Spec.ParentRefs[0].SectionName, }, }, @@ -513,7 +534,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidMatches.Spec.ParentRefs[0].SectionName, }, }, @@ -550,7 +571,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -588,7 +609,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidMatches.Spec.ParentRefs[0].SectionName, }, }, @@ -635,7 +656,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidMatchesAndInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -694,7 +715,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrDroppedInvalidFilters.Spec.ParentRefs[0].SectionName, }, }, @@ -741,7 +762,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrValidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -783,7 +804,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -821,7 +842,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -860,7 +881,7 @@ func TestBuildHTTPRoute(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gw), SectionName: hrInvalidAndUnresolvableSnippetsFilter.Spec.ParentRefs[0].SectionName, }, }, @@ -896,7 +917,9 @@ func TestBuildHTTPRoute(t *testing.T) { }, } - gatewayNsNames := []types.NamespacedName{gatewayNsName} + gws := map[types.NamespacedName]*Gateway{ + gatewayNsName: gw, + } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -907,7 +930,7 @@ func TestBuildHTTPRoute(t *testing.T) { {Namespace: "test", Name: "sf"}: {Valid: true}, } - route := buildHTTPRoute(test.validator, test.hr, gatewayNsNames, snippetsFilters) + route := buildHTTPRoute(test.validator, test.hr, gws, snippetsFilters) g.Expect(helpers.Diff(test.expected, route)).To(BeEmpty()) }) } @@ -915,8 +938,24 @@ func TestBuildHTTPRoute(t *testing.T) { func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { t.Parallel() + gatewayNsName := types.NamespacedName{Namespace: "test", Name: "gateway"} + gateways := map[types.NamespacedName]*Gateway{ + gatewayNsName: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(false), + }, + }, + } + // Create a route with a request mirror filter and another random filter mirrorFilter := gatewayv1.HTTPRouteFilter{ Type: gatewayv1.HTTPRouteFilterRequestMirror, @@ -974,7 +1013,7 @@ func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: gatewayNsName, + Gateway: CreateParentRefGateway(gateways[gatewayNsName]), SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -1018,16 +1057,15 @@ func TestBuildHTTPRouteWithMirrorRoutes(t *testing.T) { } validator := &validationfakes.FakeHTTPFieldsValidator{} - gatewayNsNames := []types.NamespacedName{gatewayNsName} snippetsFilters := map[types.NamespacedName]*SnippetsFilter{} g := NewWithT(t) routes := map[RouteKey]*L7Route{} - l7route := buildHTTPRoute(validator, hr, gatewayNsNames, snippetsFilters) + l7route := buildHTTPRoute(validator, hr, gateways, snippetsFilters) g.Expect(l7route).NotTo(BeNil()) - buildHTTPMirrorRoutes(routes, l7route, hr, gatewayNsNames, snippetsFilters) + buildHTTPMirrorRoutes(routes, l7route, hr, gateways, snippetsFilters) obj, ok := expectedMirrorRoute.Source.(*gatewayv1.HTTPRoute) g.Expect(ok).To(BeTrue()) diff --git a/internal/mode/static/state/graph/multiple_gateways_test.go b/internal/mode/static/state/graph/multiple_gateways_test.go new file mode 100644 index 0000000000..497fbd9680 --- /dev/null +++ b/internal/mode/static/state/graph/multiple_gateways_test.go @@ -0,0 +1,895 @@ +package graph + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + "sigs.k8s.io/gateway-api/apis/v1beta1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" + "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" + staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation" + "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/validation/validationfakes" +) + +const ( + controllerName = "nginx" + gcName = "my-gateway-class" +) + +var ( + plusSecret = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ngf", + Name: "plus-secret", + }, + Data: map[string][]byte{ + "license.jwt": []byte("license"), + }, + } + convertedPlusSecret = map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + Content: []byte("license"), + FieldName: "license.jwt", + }, + }, + } + + supportedHTTPGRPC = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + {Kind: gatewayv1.Kind(kinds.GRPCRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + supportedTLS = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.TLSRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + + allowedRoutesHTTPGRPC = &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.HTTPRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + {Kind: kinds.GRPCRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + } + allowedRoutesTLS = &gatewayv1.AllowedRoutes{ + Kinds: []gatewayv1.RouteGroupKind{ + {Kind: kinds.TLSRoute, Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + }, + } +) + +func createGateway(name, namespace, nginxProxyName string, listeners []gatewayv1.Listener) *gatewayv1.Gateway { + gateway := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: gatewayv1.GatewaySpec{ + GatewayClassName: gcName, + Listeners: listeners, + }, + } + + if nginxProxyName != "" { + gateway.Spec.Infrastructure = &gatewayv1.GatewayInfrastructure{ + ParametersRef: &gatewayv1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: nginxProxyName, + }, + } + } + + return gateway +} + +func createGatewayClass(name, controllerName, npName, npNamespace string) *gatewayv1.GatewayClass { + if npName == "" { + return &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: gatewayv1.GatewayController(controllerName), + }, + } + } + return &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: gatewayv1.GatewayController(controllerName), + ParametersRef: &gatewayv1.ParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: npName, + Namespace: helpers.GetPointer(gatewayv1.Namespace(npNamespace)), + }, + }, + } +} + +func convertedGatewayClass( + gc *gatewayv1.GatewayClass, + nginxProxy ngfAPIv1alpha2.NginxProxy, + cond ...conditions.Condition, +) *GatewayClass { + return &GatewayClass{ + Source: gc, + NginxProxy: &NginxProxy{ + Source: &nginxProxy, + Valid: true, + }, + Valid: true, + Conditions: cond, + } +} + +func createNginxProxy(name, namespace string, spec ngfAPIv1alpha2.NginxProxySpec) *ngfAPIv1alpha2.NginxProxy { + return &ngfAPIv1alpha2.NginxProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: spec, + } +} + +func convertedGateway( + gw *gatewayv1.Gateway, + nginxProxy *NginxProxy, + effectiveNp *EffectiveNginxProxy, + listeners []*Listener, + conds []conditions.Condition, +) *Gateway { + return &Gateway{ + Source: gw, + Valid: true, + NginxProxy: nginxProxy, + EffectiveNginxProxy: effectiveNp, + Listeners: listeners, + Conditions: conds, + DeploymentName: types.NamespacedName{ + Name: gw.Name + "-" + gcName, + Namespace: gw.Namespace, + }, + } +} + +func createListener( + name, hostname string, + port int32, + protocol gatewayv1.ProtocolType, + tlsConfig *gatewayv1.GatewayTLSConfig, + allowedRoutes *gatewayv1.AllowedRoutes, +) gatewayv1.Listener { + listener := gatewayv1.Listener{ + Name: gatewayv1.SectionName(name), + Hostname: (*gatewayv1.Hostname)(helpers.GetPointer(hostname)), + Port: gatewayv1.PortNumber(port), + Protocol: protocol, + AllowedRoutes: allowedRoutes, + } + + if tlsConfig != nil { + listener.TLS = tlsConfig + } + + return listener +} + +func convertListener( + listener gatewayv1.Listener, + gatewayNSName types.NamespacedName, + secret *v1.Secret, + supportedKinds []gatewayv1.RouteGroupKind, + l7Route map[RouteKey]*L7Route, + l4Route map[L4RouteKey]*L4Route, +) *Listener { + l := &Listener{ + Name: string(listener.Name), + GatewayName: gatewayNSName, + Source: listener, + L4Routes: l4Route, + Routes: l7Route, + Valid: true, + SupportedKinds: supportedKinds, + Attachable: true, + } + + if secret != nil { + l.ResolvedSecret = helpers.GetPointer(client.ObjectKeyFromObject(secret)) + } + return l +} + +// Test_MultipleGateways_WithNginxProxy tests how nginx proxy config is inherited or overwritten +// when multiple gateways are present in the cluster. +func Test_MultipleGateways_WithNginxProxy(t *testing.T) { + nginxProxyGlobal := createNginxProxy("nginx-proxy", testNs, ngfAPIv1alpha2.NginxProxySpec{ + DisableHTTP2: helpers.GetPointer(true), + }) + + nginxProxyGateway1 := createNginxProxy("nginx-proxy-gateway-1", testNs, ngfAPIv1alpha2.NginxProxySpec{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + }) + + nginxProxyGateway3 := createNginxProxy("nginx-proxy-gateway-3", "test2", ngfAPIv1alpha2.NginxProxySpec{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + DisableHTTP2: helpers.GetPointer(false), + }) + + gatewayClass := createGatewayClass(gcName, controllerName, "nginx-proxy", testNs) + gateway1 := createGateway("gateway-1", testNs, "", []gatewayv1.Listener{}) + gateway2 := createGateway("gateway-2", testNs, "", []gatewayv1.Listener{}) + gateway3 := createGateway("gateway-3", "test2", "", []gatewayv1.Listener{}) + + gateway1withNP := createGateway("gateway-1", testNs, "nginx-proxy-gateway-1", []gatewayv1.Listener{}) + gateway3withNP := createGateway("gateway-3", "test2", "nginx-proxy-gateway-3", []gatewayv1.Listener{}) + + gcConditions := []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()} + + tests := []struct { + clusterState ClusterState + expGraph *Graph + name string + }{ + { + name: "gateway class with nginx proxy, multiple gateways inheriting settings from global nginx proxy", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1): gateway1, + client.ObjectKeyFromObject(gateway2): gateway2, + client.ObjectKeyFromObject(gateway3): gateway3, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, gcConditions...), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1): convertedGateway( + gateway1, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway3): convertedGateway( + gateway3, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): { + Source: nginxProxyGlobal, + Valid: true, + }, + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + }, + }, + { + name: "gateway class with nginx proxy, multiple gateways with their own referenced nginx proxy", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1withNP): gateway1withNP, + client.ObjectKeyFromObject(gateway2): gateway2, + client.ObjectKeyFromObject(gateway3withNP): gateway3withNP, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + client.ObjectKeyFromObject(nginxProxyGateway1): nginxProxyGateway1, + client.ObjectKeyFromObject(nginxProxyGateway3): nginxProxyGateway3, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, gcConditions...), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1withNP): convertedGateway( + gateway1withNP, + &NginxProxy{Source: nginxProxyGateway1, Valid: true}, + &EffectiveNginxProxy{ + Logging: &ngfAPIv1alpha2.NginxLogging{ + ErrorLevel: helpers.GetPointer(ngfAPIv1alpha2.NginxLogLevelDebug), + AgentLevel: helpers.GetPointer(ngfAPIv1alpha2.AgentLogLevelDebug), + }, + DisableHTTP2: helpers.GetPointer(true), + }, + []*Listener{}, + gcConditions, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + nil, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{}, + nil, + ), + client.ObjectKeyFromObject(gateway3withNP): convertedGateway( + gateway3withNP, + &NginxProxy{Source: nginxProxyGateway3, Valid: true}, + &EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Deployment: &ngfAPIv1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + DisableHTTP2: helpers.GetPointer(false), + }, + []*Listener{}, + gcConditions, + ), + }, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + client.ObjectKeyFromObject(nginxProxyGateway1): {Source: nginxProxyGateway1, Valid: true}, + client.ObjectKeyFromObject(nginxProxyGateway3): {Source: nginxProxyGateway3, Valid: true}, + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + format.MaxLength = 10000000 + + fakePolicyValidator := &validationfakes.FakePolicyValidator{} + + result := BuildGraph( + test.clusterState, + controllerName, + gcName, + map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + FieldName: "license.jwt", + }, + }, + }, + validation.Validators{ + HTTPFieldsValidator: &validationfakes.FakeHTTPFieldsValidator{}, + GenericValidator: &validationfakes.FakeGenericValidator{}, + PolicyValidator: fakePolicyValidator, + }, + ) + + g.Expect(helpers.Diff(test.expGraph, result)).To(BeEmpty()) + }) + } +} + +// Test_MultipleGateways_WithListeners tests how listeners attach and interact with multiple gateways. +func Test_MultipleGateways_WithListeners(t *testing.T) { + nginxProxyGlobal := createNginxProxy("nginx-proxy", testNs, ngfAPIv1alpha2.NginxProxySpec{ + DisableHTTP2: helpers.GetPointer(true), + }) + gatewayClass := createGatewayClass(gcName, controllerName, "nginx-proxy", testNs) + + secretDiffNs := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "secret-ns", + Name: "secret", + }, + Data: map[string][]byte{ + v1.TLSCertKey: cert, + v1.TLSPrivateKeyKey: key, + }, + Type: v1.SecretTypeTLS, + } + + rgSecretsToGateway := &v1beta1.ReferenceGrant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rg-secret-to-gateway", + Namespace: "secret-ns", + }, + Spec: v1beta1.ReferenceGrantSpec{ + From: []v1beta1.ReferenceGrantFrom{ + { + Group: gatewayv1.GroupName, + Kind: kinds.Gateway, + Namespace: gatewayv1.Namespace(testNs), + }, + }, + To: []v1beta1.ReferenceGrantTo{ + { + Group: "core", + Kind: "Secret", + Name: helpers.GetPointer[gatewayv1.ObjectName]("secret"), + }, + }, + }, + } + + tlsConfigDiffNsSecret := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secretDiffNs.Name), + Namespace: helpers.GetPointer(gatewayv1.Namespace(secretDiffNs.Namespace)), + }, + }, + } + + gateway1 := createGateway("gateway-1", testNs, "nginx-proxy", []gatewayv1.Listener{ + createListener( + "listener-tls-mode-terminate", + "*.example.com", + 443, + gatewayv1.HTTPSProtocolType, + tlsConfigDiffNsSecret, + allowedRoutesHTTPGRPC, + ), + }) + gateway2 := createGateway("gateway-2", testNs, "nginx-proxy", []gatewayv1.Listener{ + createListener( + "listener-tls-mode-terminate", + "*.example.com", + 443, + gatewayv1.HTTPSProtocolType, + tlsConfigDiffNsSecret, + allowedRoutesHTTPGRPC, + ), + }) + + tlsConfigPassthrough := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModePassthrough), + } + + secretSameNs := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "secret", + }, + Data: map[string][]byte{ + v1.TLSCertKey: cert, + v1.TLSPrivateKeyKey: key, + }, + Type: v1.SecretTypeTLS, + } + + gatewayTLSConfigSameNs := &gatewayv1.GatewayTLSConfig{ + Mode: helpers.GetPointer(gatewayv1.TLSModeTerminate), + CertificateRefs: []gatewayv1.SecretObjectReference{ + { + Kind: helpers.GetPointer[gatewayv1.Kind]("Secret"), + Name: gatewayv1.ObjectName(secretSameNs.Name), + Namespace: (*gatewayv1.Namespace)(&secretSameNs.Namespace), + }, + }, + } + + // valid http, https and tls listeners + listeners := []gatewayv1.Listener{ + createListener( + "foo-listener-http", + "foo.example.com", + 80, + gatewayv1.HTTPProtocolType, + nil, + allowedRoutesHTTPGRPC, + ), + createListener( + "foo-listener-https", + "tea.example.com", + 443, + gatewayv1.HTTPSProtocolType, + gatewayTLSConfigSameNs, + allowedRoutesHTTPGRPC, + ), + createListener( + "listener-tls-mode-passthrough", + "cafe.example.com", + 8443, + gatewayv1.TLSProtocolType, + tlsConfigPassthrough, + allowedRoutesTLS, + ), + } + gatewayMultipleListeners1 := createGateway("gateway-multiple-listeners-1", testNs, "nginx-proxy", listeners) + gatewayMultipleListeners2 := createGateway("gateway-multiple-listeners-2", testNs, "nginx-proxy", listeners) + gatewayMultipleListeners3 := createGateway("gateway-multiple-listeners-3", testNs, "nginx-proxy", listeners) + + // valid TLS and https listener same port and hostname + gatewayTLSSamePortHostname := createGateway( + "gateway-tls-foo", + testNs, + "nginx-proxy", + []gatewayv1.Listener{ + createListener( + "foo-listener-tls", + "foo.example.com", + 443, + gatewayv1.TLSProtocolType, + tlsConfigPassthrough, + allowedRoutesTLS, + ), + }, + ) + + gatewayHTTPSSamePortHostname := createGateway( + "gateway-http-foo", + testNs, + "nginx-proxy", + []gatewayv1.Listener{ + createListener( + "foo-listener-tls", + "foo.example.com", + 443, + gatewayv1.HTTPSProtocolType, + gatewayTLSConfigSameNs, + allowedRoutesHTTPGRPC, + ), + }, + ) + + tests := []struct { + clusterState ClusterState + expGraph *Graph + name string + }{ + { + name: "multiple gateways with tls listeners, have reference grants to access the secret", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretDiffNs): secretDiffNs, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gateway1): gateway1, + client.ObjectKeyFromObject(gateway2): gateway2, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + ReferenceGrants: map[types.NamespacedName]*v1beta1.ReferenceGrant{ + client.ObjectKeyFromObject(rgSecretsToGateway): rgSecretsToGateway, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gateway1): convertedGateway( + gateway1, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gateway1.Spec.Listeners[0], + client.ObjectKeyFromObject(gateway1), + secretDiffNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gateway2): convertedGateway( + gateway2, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gateway2.Spec.Listeners[0], + client.ObjectKeyFromObject(gateway2), + secretDiffNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretDiffNs): { + Source: secretDiffNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretDiffNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + { + name: "valid http, https and tls listeners across multiple gateways with same port references," + + "leads to no port conflict", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gatewayMultipleListeners1): gatewayMultipleListeners1, + client.ObjectKeyFromObject(gatewayMultipleListeners2): gatewayMultipleListeners2, + client.ObjectKeyFromObject(gatewayMultipleListeners3): gatewayMultipleListeners3, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretSameNs): secretSameNs, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gatewayMultipleListeners1): convertedGateway( + gatewayMultipleListeners1, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners1.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners1.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners1.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners1), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayMultipleListeners2): convertedGateway( + gatewayMultipleListeners2, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners2.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners2.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners2.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners2), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayMultipleListeners3): convertedGateway( + gatewayMultipleListeners3, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayMultipleListeners3.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + nil, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners3.Spec.Listeners[1], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + convertListener( + gatewayMultipleListeners3.Spec.Listeners[2], + client.ObjectKeyFromObject(gatewayMultipleListeners3), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretSameNs): { + Source: secretSameNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretSameNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + { + name: "valid tls and https listeners across multiple gateways with same port and hostname causes no conflict", + clusterState: ClusterState{ + GatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ + client.ObjectKeyFromObject(gatewayClass): gatewayClass, + }, + Gateways: map[types.NamespacedName]*gatewayv1.Gateway{ + client.ObjectKeyFromObject(gatewayTLSSamePortHostname): gatewayTLSSamePortHostname, + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname): gatewayHTTPSSamePortHostname, + }, + NginxProxies: map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): nginxProxyGlobal, + }, + Secrets: map[types.NamespacedName]*v1.Secret{ + client.ObjectKeyFromObject(plusSecret): plusSecret, + client.ObjectKeyFromObject(secretSameNs): secretSameNs, + }, + }, + expGraph: &Graph{ + GatewayClass: convertedGatewayClass(gatewayClass, *nginxProxyGlobal, staticConds.NewGatewayClassResolvedRefs()), + Gateways: map[types.NamespacedName]*Gateway{ + client.ObjectKeyFromObject(gatewayTLSSamePortHostname): convertedGateway( + gatewayTLSSamePortHostname, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayTLSSamePortHostname.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayTLSSamePortHostname), + nil, + supportedTLS, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname): convertedGateway( + gatewayHTTPSSamePortHostname, + &NginxProxy{Source: nginxProxyGlobal, Valid: true}, + &EffectiveNginxProxy{DisableHTTP2: helpers.GetPointer(true)}, + []*Listener{ + convertListener( + gatewayHTTPSSamePortHostname.Spec.Listeners[0], + client.ObjectKeyFromObject(gatewayHTTPSSamePortHostname), + secretSameNs, + supportedHTTPGRPC, + map[RouteKey]*L7Route{}, + map[L4RouteKey]*L4Route{}, + ), + }, + []conditions.Condition{staticConds.NewGatewayClassResolvedRefs()}, + ), + }, + Routes: map[RouteKey]*L7Route{}, + L4Routes: map[L4RouteKey]*L4Route{}, + PlusSecrets: convertedPlusSecret, + ReferencedNginxProxies: map[types.NamespacedName]*NginxProxy{ + client.ObjectKeyFromObject(nginxProxyGlobal): {Source: nginxProxyGlobal, Valid: true}, + }, + ReferencedSecrets: map[types.NamespacedName]*Secret{ + client.ObjectKeyFromObject(secretSameNs): { + Source: secretSameNs, + CertBundle: NewCertificateBundle(client.ObjectKeyFromObject(secretSameNs), "Secret", &Certificate{ + TLSCert: cert, + TLSPrivateKey: key, + }), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + format.MaxLength = 10000000 + + fakePolicyValidator := &validationfakes.FakePolicyValidator{} + + result := BuildGraph( + test.clusterState, + controllerName, + gcName, + map[types.NamespacedName][]PlusSecretFile{ + client.ObjectKeyFromObject(plusSecret): { + { + Type: PlusReportJWTToken, + FieldName: "license.jwt", + }, + }, + }, + validation.Validators{ + HTTPFieldsValidator: &validationfakes.FakeHTTPFieldsValidator{}, + GenericValidator: &validationfakes.FakeGenericValidator{}, + PolicyValidator: fakePolicyValidator, + }, + ) + + g.Expect(helpers.Diff(test.expGraph, result)).To(BeEmpty()) + }) + } +} diff --git a/internal/mode/static/state/graph/namespace.go b/internal/mode/static/state/graph/namespace.go index 481e4d749b..8cbda90f7e 100644 --- a/internal/mode/static/state/graph/namespace.go +++ b/internal/mode/static/state/graph/namespace.go @@ -10,12 +10,12 @@ import ( // a label that matches any of the Gateway Listener's label selector. func buildReferencedNamespaces( clusterNamespaces map[types.NamespacedName]*v1.Namespace, - gw *Gateway, + gateways map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*v1.Namespace { referencedNamespaces := make(map[types.NamespacedName]*v1.Namespace) for name, ns := range clusterNamespaces { - if isNamespaceReferenced(ns, gw) { + if isNamespaceReferenced(ns, gateways) { referencedNamespaces[name] = ns } } @@ -28,19 +28,21 @@ func buildReferencedNamespaces( // isNamespaceReferenced returns true if a given Namespace resource has a label // that matches any of the Gateway Listener's label selector. -func isNamespaceReferenced(ns *v1.Namespace, gw *Gateway) bool { - if gw == nil || ns == nil { +func isNamespaceReferenced(ns *v1.Namespace, gws map[types.NamespacedName]*Gateway) bool { + if ns == nil || len(gws) == 0 { return false } nsLabels := labels.Set(ns.GetLabels()) - for _, listener := range gw.Listeners { - if listener.AllowedRouteLabelSelector == nil { - // Can have listeners with AllowedRouteLabelSelector not set. - continue - } - if listener.AllowedRouteLabelSelector.Matches(nsLabels) { - return true + for _, gw := range gws { + for _, listener := range gw.Listeners { + if listener.AllowedRouteLabelSelector == nil { + // Can have listeners with AllowedRouteLabelSelector not set. + continue + } + if listener.AllowedRouteLabelSelector.Matches(nsLabels) { + return true + } } } diff --git a/internal/mode/static/state/graph/namespace_test.go b/internal/mode/static/state/graph/namespace_test.go index 372fd3d12d..af2e2ecc2b 100644 --- a/internal/mode/static/state/graph/namespace_test.go +++ b/internal/mode/static/state/graph/namespace_test.go @@ -4,9 +4,8 @@ import ( "testing" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" ) @@ -44,20 +43,22 @@ func TestBuildReferencedNamespaces(t *testing.T) { } tests := []struct { - gw *Gateway + gws map[types.NamespacedName]*Gateway expectedRefNS map[types.NamespacedName]*v1.Namespace name string }{ { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -65,20 +66,22 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway matches labels with one namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"peaches": "bananas"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"peaches": "bananas"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -87,60 +90,67 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway matches labels with two namespaces", }, { - gw: &Gateway{ - Listeners: []*Listener{}, - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{}, + Valid: true, + }, }, expectedRefNS: nil, name: "gateway has no Listeners", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - }, - { - Name: "listener-2", - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + }, + { + Name: "listener-2", + Valid: true, + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: nil, name: "gateway has multiple listeners with no AllowedRouteLabelSelector set", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + }, }, + Valid: true, }, - Valid: true, }, - expectedRefNS: nil, name: "gateway doesn't match labels with any namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"not": "matching"}), + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -148,19 +158,21 @@ func TestBuildReferencedNamespaces(t *testing.T) { name: "gateway has two listeners and only matches labels with one namespace", }, { - gw: &Gateway{ - Listeners: []*Listener{ - { - Name: "listener-1", - Valid: true, - AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), - }, - { - Name: "listener-2", - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {}: { + Listeners: []*Listener{ + { + Name: "listener-1", + Valid: true, + AllowedRouteLabelSelector: labels.SelectorFromSet(map[string]string{"apples": "oranges"}), + }, + { + Name: "listener-2", + Valid: true, + }, }, + Valid: true, }, - Valid: true, }, expectedRefNS: map[types.NamespacedName]*v1.Namespace{ {Name: "ns2"}: ns2, @@ -173,7 +185,7 @@ func TestBuildReferencedNamespaces(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildReferencedNamespaces(clusterNamespaces, test.gw)).To(Equal(test.expectedRefNS)) + g.Expect(buildReferencedNamespaces(clusterNamespaces, test.gws)).To(Equal(test.expectedRefNS)) }) } } @@ -182,13 +194,13 @@ func TestIsNamespaceReferenced(t *testing.T) { t.Parallel() tests := []struct { ns *v1.Namespace - gw *Gateway + gws map[types.NamespacedName]*Gateway name string exp bool }{ { ns: nil, - gw: nil, + gws: nil, exp: false, name: "namespace and gateway are nil", }, @@ -198,15 +210,17 @@ func TestIsNamespaceReferenced(t *testing.T) { Name: "ns1", }, }, - gw: nil, + gws: nil, exp: false, name: "namespace is valid but gateway is nil", }, { ns: nil, - gw: &Gateway{ - Listeners: []*Listener{}, - Valid: true, + gws: map[types.NamespacedName]*Gateway{ + {Name: "ns1"}: { + Listeners: []*Listener{}, + Valid: true, + }, }, exp: false, name: "gateway is valid but namespace is nil", @@ -218,7 +232,7 @@ func TestIsNamespaceReferenced(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(isNamespaceReferenced(test.ns, test.gw)).To(Equal(test.exp)) + g.Expect(isNamespaceReferenced(test.ns, test.gws)).To(Equal(test.exp)) }) } } diff --git a/internal/mode/static/state/graph/nginxproxy.go b/internal/mode/static/state/graph/nginxproxy.go index e9993ab73f..3b72161233 100644 --- a/internal/mode/static/state/graph/nginxproxy.go +++ b/internal/mode/static/state/graph/nginxproxy.go @@ -127,7 +127,7 @@ func processNginxProxies( nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy, validator validation.GenericValidator, gc *v1.GatewayClass, - winningGateway *v1.Gateway, + gws map[types.NamespacedName]*v1.Gateway, ) map[types.NamespacedName]*NginxProxy { referencedNginxProxies := make(map[types.NamespacedName]*NginxProxy) @@ -146,14 +146,17 @@ func processNginxProxies( } } - if gwReferencesAnyNginxProxy(winningGateway) { - refNp := types.NamespacedName{ - Name: winningGateway.Spec.Infrastructure.ParametersRef.Name, - Namespace: winningGateway.Namespace, - } - - if np, ok := nps[refNp]; ok { - referencedNginxProxies[refNp] = buildNginxProxy(np, validator) + for _, gw := range gws { + if gwReferencesAnyNginxProxy(gw) { + refNp := types.NamespacedName{ + Name: gw.Spec.Infrastructure.ParametersRef.Name, + Namespace: gw.Namespace, + } + if np, ok := nps[refNp]; ok { + referencedNginxProxies[refNp] = buildNginxProxy(np, validator) + } else { + referencedNginxProxies[refNp] = nil + } } } diff --git a/internal/mode/static/state/graph/nginxproxy_test.go b/internal/mode/static/state/graph/nginxproxy_test.go index bf074ab562..4e21c7283d 100644 --- a/internal/mode/static/state/graph/nginxproxy_test.go +++ b/internal/mode/static/state/graph/nginxproxy_test.go @@ -481,16 +481,18 @@ func TestProcessNginxProxies(t *testing.T) { } } - gateway := &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "gw-ns", - }, - Spec: v1.GatewaySpec{ - Infrastructure: &v1.GatewayInfrastructure{ - ParametersRef: &v1.LocalParametersReference{ - Group: ngfAPIv1alpha2.GroupName, - Kind: kinds.NginxProxy, - Name: gatewayNpName.Name, + gateway := map[types.NamespacedName]*v1.Gateway{ + gatewayNpName: { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "gw-ns", + }, + Spec: v1.GatewaySpec{ + Infrastructure: &v1.GatewayInfrastructure{ + ParametersRef: &v1.LocalParametersReference{ + Group: ngfAPIv1alpha2.GroupName, + Kind: kinds.NginxProxy, + Name: gatewayNpName.Name, + }, }, }, }, @@ -551,7 +553,7 @@ func TestProcessNginxProxies(t *testing.T) { validator validation.GenericValidator nps map[types.NamespacedName]*ngfAPIv1alpha2.NginxProxy gc *v1.GatewayClass - gw *v1.Gateway + gws map[types.NamespacedName]*v1.Gateway expResult map[types.NamespacedName]*NginxProxy name string }{ @@ -559,9 +561,9 @@ func TestProcessNginxProxies(t *testing.T) { name: "no nginx proxies", nps: nil, gc: gatewayClass, - gw: gateway, + gws: gateway, validator: createValidValidator(), - expResult: nil, + expResult: map[types.NamespacedName]*NginxProxy{gatewayNpName: nil}, }, { name: "gateway class param ref is missing namespace", @@ -570,7 +572,7 @@ func TestProcessNginxProxies(t *testing.T) { gatewayNpName: getTestNp(gatewayNpName), }, gc: gatewayClassRefMissingNs, - gw: gateway, + gws: gateway, validator: createValidValidator(), expResult: map[types.NamespacedName]*NginxProxy{ gatewayNpName: { @@ -583,7 +585,7 @@ func TestProcessNginxProxies(t *testing.T) { name: "normal case; both nginx proxies are valid", nps: getNpMap(), gc: gatewayClass, - gw: gateway, + gws: gateway, validator: createValidValidator(), expResult: getExpResult(true), }, @@ -591,7 +593,7 @@ func TestProcessNginxProxies(t *testing.T) { name: "normal case; both nginx proxies are invalid", nps: getNpMap(), gc: gatewayClass, - gw: gateway, + gws: gateway, validator: createInvalidValidator(), expResult: getExpResult(false), }, @@ -606,7 +608,7 @@ func TestProcessNginxProxies(t *testing.T) { test.nps, test.validator, test.gc, - test.gw, + test.gws, ) g.Expect(helpers.Diff(test.expResult, result)).To(BeEmpty()) diff --git a/internal/mode/static/state/graph/policies.go b/internal/mode/static/state/graph/policies.go index 04fb6a0767..50dd3c3601 100644 --- a/internal/mode/static/state/graph/policies.go +++ b/internal/mode/static/state/graph/policies.go @@ -21,6 +21,9 @@ import ( type Policy struct { // Source is the corresponding Policy resource. Source policies.Policy + // InvalidForGateways is a map of Gateways for which this Policy is invalid for. Certain NginxProxy + // configurations may result in a policy not being valid for some Gateways, but not others. + InvalidForGateways map[types.NamespacedName]struct{} // Ancestors is a list of ancestor objects of the Policy. Used in status. Ancestors []PolicyAncestor // TargetRefs are the resources that the Policy targets. @@ -67,8 +70,8 @@ const ( ) // attachPolicies attaches the graph's processed policies to the resources they target. It modifies the graph in place. -func (g *Graph) attachPolicies(ctlrName string) { - if g.Gateway == nil { +func (g *Graph) attachPolicies(validator validation.PolicyValidator, ctlrName string) { + if len(g.Gateways) == 0 { return } @@ -76,21 +79,21 @@ func (g *Graph) attachPolicies(ctlrName string) { for _, ref := range policy.TargetRefs { switch ref.Kind { case kinds.Gateway: - attachPolicyToGateway(policy, ref, g.Gateway, g.IgnoredGateways, ctlrName) + attachPolicyToGateway(policy, ref, g.Gateways, ctlrName) case kinds.HTTPRoute, kinds.GRPCRoute: route, exists := g.Routes[routeKeyForKind(ref.Kind, ref.Nsname)] if !exists { continue } - attachPolicyToRoute(policy, route, ctlrName) + attachPolicyToRoute(policy, route, validator, ctlrName) case kinds.Service: svc, exists := g.ReferencedServices[ref.Nsname] if !exists { continue } - attachPolicyToService(policy, svc, g.Gateway, ctlrName) + attachPolicyToService(policy, svc, g.Gateways, ctlrName) } } } @@ -99,35 +102,51 @@ func (g *Graph) attachPolicies(ctlrName string) { func attachPolicyToService( policy *Policy, svc *ReferencedService, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, ctlrName string, ) { if ngfPolicyAncestorsFull(policy, ctlrName) { return } - ancestor := PolicyAncestor{ - Ancestor: createParentReference(v1.GroupName, kinds.Gateway, client.ObjectKeyFromObject(gw.Source)), - } + var validForAGateway bool + for gwNsName, gw := range gws { + if _, belongsToGw := svc.GatewayNsNames[gwNsName]; !belongsToGw { + continue + } - if !gw.Valid { - ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("Parent Gateway is invalid")} - if ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { - return + ancestor := PolicyAncestor{ + Ancestor: createParentReference(v1.GroupName, kinds.Gateway, client.ObjectKeyFromObject(gw.Source)), } - policy.Ancestors = append(policy.Ancestors, ancestor) - return - } + if !gw.Valid { + policy.InvalidForGateways[gwNsName] = struct{}{} + ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("Parent Gateway is invalid")} + if ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { + continue + } - if !ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { - policy.Ancestors = append(policy.Ancestors, ancestor) + policy.Ancestors = append(policy.Ancestors, ancestor) + continue + } + + if !ancestorsContainsAncestorRef(policy.Ancestors, ancestor.Ancestor) { + policy.Ancestors = append(policy.Ancestors, ancestor) + } + validForAGateway = true } - svc.Policies = append(svc.Policies, policy) + if validForAGateway { + svc.Policies = append(svc.Policies, policy) + } } -func attachPolicyToRoute(policy *Policy, route *L7Route, ctlrName string) { +func attachPolicyToRoute(policy *Policy, route *L7Route, validator validation.PolicyValidator, ctlrName string) { + if ngfPolicyAncestorsFull(policy, ctlrName) { + // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 + return + } + kind := v1.Kind(kinds.HTTPRoute) if route.RouteType == RouteTypeGRPC { kind = kinds.GRPCRoute @@ -139,31 +158,43 @@ func attachPolicyToRoute(policy *Policy, route *L7Route, ctlrName string) { Ancestor: createParentReference(v1.GroupName, kind, routeNsName), } - if ngfPolicyAncestorsFull(policy, ctlrName) { - // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 - return - } - if !route.Valid || !route.Attachable || len(route.ParentRefs) == 0 { ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is invalid")} policy.Ancestors = append(policy.Ancestors, ancestor) return } + // as of now, ObservabilityPolicy is the only policy that needs this check, and it only attaches to Routes + for _, parentRef := range route.ParentRefs { + if parentRef.Gateway != nil && parentRef.Gateway.EffectiveNginxProxy != nil { + gw := parentRef.Gateway + globalSettings := &policies.GlobalSettings{ + TelemetryEnabled: telemetryEnabledForNginxProxy(gw.EffectiveNginxProxy), + } + + if conds := validator.ValidateGlobalSettings(policy.Source, globalSettings); len(conds) > 0 { + policy.InvalidForGateways[gw.NamespacedName] = struct{}{} + ancestor.Conditions = append(ancestor.Conditions, conds...) + } + } + } + policy.Ancestors = append(policy.Ancestors, ancestor) + if len(policy.InvalidForGateways) == len(route.ParentRefs) { + return + } + route.Policies = append(route.Policies, policy) } func attachPolicyToGateway( policy *Policy, ref PolicyTargetRef, - gw *Gateway, - ignoredGateways map[types.NamespacedName]*v1.Gateway, + gateways map[types.NamespacedName]*Gateway, ctlrName string, ) { - _, ignored := ignoredGateways[ref.Nsname] - - if !ignored && ref.Nsname != client.ObjectKeyFromObject(gw.Source) { + if ngfPolicyAncestorsFull(policy, ctlrName) { + // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 return } @@ -171,18 +202,17 @@ func attachPolicyToGateway( Ancestor: createParentReference(v1.GroupName, kinds.Gateway, ref.Nsname), } - if ngfPolicyAncestorsFull(policy, ctlrName) { - // FIXME (kate-osborn): https://github.com/nginx/nginx-gateway-fabric/issues/1987 - return - } + gw, exists := gateways[ref.Nsname] - if ignored { - ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is ignored")} + if !exists || (gw != nil && gw.Source == nil) { + policy.InvalidForGateways[ref.Nsname] = struct{}{} + ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is not found")} policy.Ancestors = append(policy.Ancestors, ancestor) return } if !gw.Valid { + policy.InvalidForGateways[ref.Nsname] = struct{}{} ancestor.Conditions = []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is invalid")} policy.Ancestors = append(policy.Ancestors, ancestor) return @@ -195,12 +225,11 @@ func attachPolicyToGateway( func processPolicies( pols map[PolicyKey]policies.Policy, validator validation.PolicyValidator, - gateways processedGateways, routes map[RouteKey]*L7Route, services map[types.NamespacedName]*ReferencedService, - globalSettings *policies.GlobalSettings, + gws map[types.NamespacedName]*Gateway, ) map[PolicyKey]*Policy { - if len(pols) == 0 || gateways.Winner == nil { + if len(pols) == 0 || len(gws) == 0 { return nil } @@ -217,7 +246,7 @@ func processPolicies( switch refGroupKind(ref.Group, ref.Kind) { case gatewayGroupKind: - if !gatewayExists(refNsName, gateways.Winner, gateways.Ignored) { + if !gatewayExists(refNsName, gws) { continue } case hrGroupKind, grpcGroupKind: @@ -249,14 +278,15 @@ func processPolicies( overlapConds := checkTargetRoutesForOverlap(targetedRoutes, routes) conds = append(conds, overlapConds...) - conds = append(conds, validator.Validate(policy, globalSettings)...) + conds = append(conds, validator.Validate(policy)...) processedPolicies[key] = &Policy{ - Source: policy, - Valid: len(conds) == 0, - Conditions: conds, - TargetRefs: targetRefs, - Ancestors: make([]PolicyAncestor, 0, len(targetRefs)), + Source: policy, + Valid: len(conds) == 0, + Conditions: conds, + TargetRefs: targetRefs, + Ancestors: make([]PolicyAncestor, 0, len(targetRefs)), + InvalidForGateways: make(map[types.NamespacedName]struct{}), } } diff --git a/internal/mode/static/state/graph/policies_test.go b/internal/mode/static/state/graph/policies_test.go index a7a8b71dc2..4adda19357 100644 --- a/internal/mode/static/state/graph/policies_test.go +++ b/internal/mode/static/state/graph/policies_test.go @@ -11,6 +11,7 @@ import ( v1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" @@ -75,8 +76,10 @@ func TestAttachPolicies(t *testing.T) { } expectNoGatewayPolicyAttachment := func(g *WithT, graph *Graph) { - if graph.Gateway != nil { - g.Expect(graph.Gateway.Policies).To(BeNil()) + for _, gw := range graph.Gateways { + if gw != nil { + g.Expect(gw.Policies).To(BeNil()) + } } } @@ -93,8 +96,10 @@ func TestAttachPolicies(t *testing.T) { } expectGatewayPolicyAttachment := func(g *WithT, graph *Graph) { - if graph.Gateway != nil { - g.Expect(graph.Gateway.Policies).To(HaveLen(1)) + for _, gw := range graph.Gateways { + if gw != nil { + g.Expect(gw.Policies).To(HaveLen(1)) + } } } @@ -144,26 +149,43 @@ func TestAttachPolicies(t *testing.T) { ) } - getGateway := func() *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: testNs, + getGateways := func() map[types.NamespacedName]*Gateway { + return map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gateway"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: testNs, + }, }, + Valid: true, + }, + {Namespace: testNs, Name: "gateway1"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway1", + Namespace: testNs, + }, + }, + Valid: true, }, - Valid: true, } } getServices := func() map[types.NamespacedName]*ReferencedService { return map[types.NamespacedName]*ReferencedService{ - {Namespace: testNs, Name: "svc-1"}: {}, + {Namespace: testNs, Name: "svc-1"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: testNs, Name: "gateway"}: {}, + {Namespace: testNs, Name: "gateway1"}: {}, + }, + Policies: nil, + }, } } tests := []struct { - gateway *Gateway + gateway map[types.NamespacedName]*Gateway routes map[RouteKey]*L7Route svcs map[types.NamespacedName]*ReferencedService ngfPolicies map[PolicyKey]*Policy @@ -178,7 +200,7 @@ func TestAttachPolicies(t *testing.T) { }, { name: "nil Routes; gateway and service policies attach", - gateway: getGateway(), + gateway: getGateways(), svcs: getServices(), ngfPolicies: getPolicies(), expects: []func(g *WithT, graph *Graph){ @@ -191,7 +213,7 @@ func TestAttachPolicies(t *testing.T) { name: "nil ReferencedServices; gateway and route policies attach", routes: getRoutes(), ngfPolicies: getPolicies(), - gateway: getGateway(), + gateway: getGateways(), expects: []func(g *WithT, graph *Graph){ expectGatewayPolicyAttachment, expectRoutePolicyAttachment, @@ -203,7 +225,7 @@ func TestAttachPolicies(t *testing.T) { routes: getRoutes(), svcs: getServices(), ngfPolicies: getPolicies(), - gateway: getGateway(), + gateway: getGateways(), expects: expectAllAttachmentList, }, } @@ -214,13 +236,13 @@ func TestAttachPolicies(t *testing.T) { g := NewWithT(t) graph := &Graph{ - Gateway: test.gateway, + Gateways: test.gateway, Routes: test.routes, ReferencedServices: test.svcs, NGFPolicies: test.ngfPolicies, } - graph.attachPolicies("nginx-gateway") + graph.attachPolicies(nil, "nginx-gateway") for _, expect := range test.expects { expect(g, graph) } @@ -275,34 +297,49 @@ func TestAttachPolicyToRoute(t *testing.T) { } } + validatorError := &policiesfakes.FakeValidator{ + ValidateGlobalSettingsStub: func(_ policies.Policy, gs *policies.GlobalSettings) []conditions.Condition { + if !gs.TelemetryEnabled { + return []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + } + } + return nil + }, + } + tests := []struct { route *L7Route policy *Policy + validator policies.Validator name string expAncestors []PolicyAncestor expAttached bool }{ { - name: "policy attaches to http route", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "policy attaches to http route", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ {Ancestor: createExpAncestor(kinds.HTTPRoute)}, }, expAttached: true, }, { - name: "policy attaches to grpc route", - route: createGRPCRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "policy attaches to grpc route", + route: createGRPCRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ {Ancestor: createExpAncestor(kinds.GRPCRoute)}, }, expAttached: true, }, { - name: "attachment with existing ancestor", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + name: "attachment with existing ancestor", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, policy: &Policy{ Source: &policiesfakes.FakePolicy{}, Ancestors: []PolicyAncestor{ @@ -316,9 +353,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: true, }, { - name: "no attachment; unattachable route", - route: createHTTPRoute(true /*valid*/, false /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; unattachable route", + route: createHTTPRoute(true /*valid*/, false /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -328,9 +366,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: false, }, { - name: "no attachment; missing parentRefs", - route: createHTTPRoute(true /*valid*/, true /*attachable*/, false /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; missing parentRefs", + route: createHTTPRoute(true /*valid*/, true /*attachable*/, false /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -340,9 +379,10 @@ func TestAttachPolicyToRoute(t *testing.T) { expAttached: false, }, { - name: "no attachment; invalid route", - route: createHTTPRoute(false /*valid*/, true /*attachable*/, true /*parentRefs*/), - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, + name: "no attachment; invalid route", + route: createHTTPRoute(false /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, + policy: &Policy{Source: &policiesfakes.FakePolicy{}}, expAncestors: []PolicyAncestor{ { Ancestor: createExpAncestor(kinds.HTTPRoute), @@ -354,10 +394,104 @@ func TestAttachPolicyToRoute(t *testing.T) { { name: "no attachment; max ancestors", route: createHTTPRoute(true /*valid*/, true /*attachable*/, true /*parentRefs*/), + validator: &policiesfakes.FakeValidator{}, policy: &Policy{Source: createTestPolicyWithAncestors(16)}, expAncestors: nil, expAttached: false, }, + { + name: "invalid for some ParentRefs", + route: &L7Route{ + Source: &v1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: routeNsName.Name, + Namespace: routeNsName.Namespace, + }, + }, + Valid: true, + Attachable: true, + RouteType: RouteTypeHTTP, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway1", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{ + Telemetry: &ngfAPIv1alpha2.Telemetry{ + Exporter: &ngfAPIv1alpha2.TelemetryExporter{ + Endpoint: helpers.GetPointer("test-endpoint"), + }, + }, + }, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway2", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{}, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + }, + }, + validator: validatorError, + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + expAncestors: []PolicyAncestor{ + { + Ancestor: createExpAncestor(kinds.HTTPRoute), + Conditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + }, + expAttached: true, + }, + { + name: "invalid for all ParentRefs", + route: &L7Route{ + Source: &v1.HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: routeNsName.Name, + Namespace: routeNsName.Namespace, + }, + }, + Valid: true, + Attachable: true, + RouteType: RouteTypeHTTP, + ParentRefs: []ParentRef{ + { + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{Name: "gateway1", Namespace: "test"}, + EffectiveNginxProxy: &EffectiveNginxProxy{}, + }, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + }, + }, + }, + }, + validator: validatorError, + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + expAncestors: []PolicyAncestor{ + { + Ancestor: createExpAncestor(kinds.HTTPRoute), + Conditions: []conditions.Condition{ + staticConds.NewPolicyNotAcceptedNginxProxyNotSet(staticConds.PolicyMessageTelemetryNotEnabled), + }, + }, + }, + expAttached: false, + }, } for _, test := range tests { @@ -365,7 +499,7 @@ func TestAttachPolicyToRoute(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToRoute(test.policy, test.route, "nginx-gateway") + attachPolicyToRoute(test.policy, test.route, test.validator, "nginx-gateway") if test.expAttached { g.Expect(test.route.Policies).To(HaveLen(1)) @@ -382,23 +516,26 @@ func TestAttachPolicyToGateway(t *testing.T) { t.Parallel() gatewayNsName := types.NamespacedName{Namespace: testNs, Name: "gateway"} gateway2NsName := types.NamespacedName{Namespace: testNs, Name: "gateway2"} - ignoredGatewayNsName := types.NamespacedName{Namespace: testNs, Name: "ignored"} - newGateway := func(valid bool, nsname types.NamespacedName) *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: nsname.Namespace, - Name: nsname.Name, + newGatewayMap := func(valid bool, nsname []types.NamespacedName) map[types.NamespacedName]*Gateway { + gws := make(map[types.NamespacedName]*Gateway) + for _, name := range nsname { + gws[name] = &Gateway{ + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.Name, + Namespace: name.Namespace, + }, }, - }, - Valid: valid, + Valid: valid, + } } + return gws } tests := []struct { policy *Policy - gw *Gateway + gws map[types.NamespacedName]*Gateway name string expAncestors []PolicyAncestor expAttached bool @@ -413,8 +550,9 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, }, @@ -430,11 +568,12 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, Ancestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, }, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ {Ancestor: getGatewayParentRef(gatewayNsName)}, {Ancestor: getGatewayParentRef(gatewayNsName)}, @@ -442,21 +581,22 @@ func TestAttachPolicyToGateway(t *testing.T) { expAttached: true, }, { - name: "not attached; gateway ignored", + name: "not attached; gateway is not found", policy: &Policy{ Source: &policiesfakes.FakePolicy{}, TargetRefs: []PolicyTargetRef{ { - Nsname: ignoredGatewayNsName, + Nsname: gateway2NsName, Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ { - Ancestor: getGatewayParentRef(ignoredGatewayNsName), - Conditions: []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is ignored")}, + Ancestor: getGatewayParentRef(gateway2NsName), + Conditions: []conditions.Condition{staticConds.NewPolicyTargetNotFound("TargetRef is not found")}, }, }, expAttached: false, @@ -471,8 +611,9 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(false, gatewayNsName), + gws: newGatewayMap(false, []types.NamespacedName{gatewayNsName}), expAncestors: []PolicyAncestor{ { Ancestor: getGatewayParentRef(gatewayNsName), @@ -481,21 +622,6 @@ func TestAttachPolicyToGateway(t *testing.T) { }, expAttached: false, }, - { - name: "not attached; non-NGF gateway", - policy: &Policy{ - Source: &policiesfakes.FakePolicy{}, - TargetRefs: []PolicyTargetRef{ - { - Nsname: gateway2NsName, - Kind: "Gateway", - }, - }, - }, - gw: newGateway(true, gatewayNsName), - expAncestors: nil, - expAttached: false, - }, { name: "not attached; max ancestors", policy: &Policy{ @@ -506,28 +632,29 @@ func TestAttachPolicyToGateway(t *testing.T) { Kind: "Gateway", }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - gw: newGateway(true, gatewayNsName), + gws: newGatewayMap(true, []types.NamespacedName{gatewayNsName}), expAncestors: nil, expAttached: false, }, } for _, test := range tests { - ignoredGateways := map[types.NamespacedName]*v1.Gateway{ - ignoredGatewayNsName: nil, - } - t.Run(test.name, func(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToGateway(test.policy, test.policy.TargetRefs[0], test.gw, ignoredGateways, "nginx-gateway") + attachPolicyToGateway(test.policy, test.policy.TargetRefs[0], test.gws, "nginx-gateway") if test.expAttached { - g.Expect(test.gw.Policies).To(HaveLen(1)) + for _, gw := range test.gws { + g.Expect(gw.Policies).To(HaveLen(1)) + } } else { - g.Expect(test.gw.Policies).To(BeEmpty()) + for _, gw := range test.gws { + g.Expect(gw.Policies).To(BeEmpty()) + } } g.Expect(test.policy.Ancestors).To(BeEquivalentTo(test.expAncestors)) @@ -541,31 +668,37 @@ func TestAttachPolicyToService(t *testing.T) { gwNsname := types.NamespacedName{Namespace: testNs, Name: "gateway"} gw2Nsname := types.NamespacedName{Namespace: testNs, Name: "gateway2"} - getGateway := func(valid bool) *Gateway { - return &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: gwNsname.Name, - Namespace: gwNsname.Namespace, + getGateway := func(valid bool) map[types.NamespacedName]*Gateway { + return map[types.NamespacedName]*Gateway{ + gwNsname: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsname.Name, + Namespace: gwNsname.Namespace, + }, }, + Valid: valid, }, - Valid: valid, } } tests := []struct { policy *Policy svc *ReferencedService - gw *Gateway + gws map[types.NamespacedName]*Gateway name string expAncestors []PolicyAncestor expAttached bool }{ { - name: "attachment", - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + name: "attachment", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -582,9 +715,14 @@ func TestAttachPolicyToService(t *testing.T) { Ancestor: getGatewayParentRef(gwNsname), }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, }, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -601,9 +739,15 @@ func TestAttachPolicyToService(t *testing.T) { Ancestor: getGatewayParentRef(gw2Nsname), }, }, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + }, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gw2Nsname: {}, + gwNsname: {}, + }, }, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + gws: getGateway(true /*valid*/), expAttached: true, expAncestors: []PolicyAncestor{ { @@ -615,10 +759,14 @@ func TestAttachPolicyToService(t *testing.T) { }, }, { - name: "no attachment; gateway is invalid", - policy: &Policy{Source: &policiesfakes.FakePolicy{}}, - svc: &ReferencedService{}, - gw: getGateway(false /*invalid*/), + name: "no attachment; gateway is invalid", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(false /*invalid*/), expAttached: false, expAncestors: []PolicyAncestor{ { @@ -628,13 +776,55 @@ func TestAttachPolicyToService(t *testing.T) { }, }, { - name: "no attachment; max ancestor", - policy: &Policy{Source: createTestPolicyWithAncestors(16)}, - svc: &ReferencedService{}, - gw: getGateway(true /*valid*/), + name: "no attachment; max ancestor", + policy: &Policy{Source: createTestPolicyWithAncestors(16), InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(true /*valid*/), + expAttached: false, + expAncestors: nil, + }, + { + name: "no attachment; does not belong to gateway", + policy: &Policy{Source: &policiesfakes.FakePolicy{}, InvalidForGateways: map[types.NamespacedName]struct{}{}}, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gw2Nsname: {}, + }, + }, + gws: getGateway(true /*valid*/), expAttached: false, expAncestors: nil, }, + { + name: "no attachment; gateway is invalid", + policy: &Policy{ + Source: &policiesfakes.FakePolicy{}, + InvalidForGateways: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + Ancestors: []PolicyAncestor{ + { + Ancestor: getGatewayParentRef(gwNsname), + }, + }, + }, + svc: &ReferencedService{ + GatewayNsNames: map[types.NamespacedName]struct{}{ + gwNsname: {}, + }, + }, + gws: getGateway(false), + expAttached: false, + expAncestors: []PolicyAncestor{ + { + Ancestor: getGatewayParentRef(gwNsname), + }, + }, + }, } for _, test := range tests { @@ -642,7 +832,7 @@ func TestAttachPolicyToService(t *testing.T) { t.Parallel() g := NewWithT(t) - attachPolicyToService(test.policy, test.svc, test.gw, "ctlr") + attachPolicyToService(test.policy, test.svc, test.gws, "ctlr") if test.expAttached { g.Expect(test.svc.Policies).To(HaveLen(1)) } else { @@ -663,7 +853,7 @@ func TestProcessPolicies(t *testing.T) { hrRef := createTestRef(kinds.HTTPRoute, v1.GroupName, "hr") grpcRef := createTestRef(kinds.GRPCRoute, v1.GroupName, "grpc") gatewayRef := createTestRef(kinds.Gateway, v1.GroupName, "gw") - ignoredGatewayRef := createTestRef(kinds.Gateway, v1.GroupName, "ignored") + gatewayRef2 := createTestRef(kinds.Gateway, v1.GroupName, "gw2") svcRef := createTestRef(kinds.Service, "core", "svc") // These refs reference objects that do not belong to NGF. @@ -677,7 +867,7 @@ func TestProcessPolicies(t *testing.T) { pol1, pol1Key := createTestPolicyAndKey(policyGVK, "pol1", hrRef) pol2, pol2Key := createTestPolicyAndKey(policyGVK, "pol2", grpcRef) pol3, pol3Key := createTestPolicyAndKey(policyGVK, "pol3", gatewayRef) - pol4, pol4Key := createTestPolicyAndKey(policyGVK, "pol4", ignoredGatewayRef) + pol4, pol4Key := createTestPolicyAndKey(policyGVK, "pol4", gatewayRef2) pol5, pol5Key := createTestPolicyAndKey(policyGVK, "pol5", hrDoesNotExistRef) pol6, pol6Key := createTestPolicyAndKey(policyGVK, "pol6", hrWrongGroup) pol7, pol7Key := createTestPolicyAndKey(policyGVK, "pol7", gatewayWrongGroupRef) @@ -724,8 +914,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol2Key: { Source: pol2, @@ -736,8 +927,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol3Key: { Source: pol3, @@ -748,20 +940,22 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol4Key: { Source: pol4, TargetRefs: []PolicyTargetRef{ { - Nsname: types.NamespacedName{Namespace: testNs, Name: "ignored"}, + Nsname: types.NamespacedName{Namespace: testNs, Name: "gw2"}, Kind: kinds.Gateway, Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol10Key: { Source: pol10, @@ -772,18 +966,16 @@ func TestProcessPolicies(t *testing.T) { Group: "core", }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, }, }, { name: "invalid and valid policies", validator: &policiesfakes.FakeValidator{ - ValidateStub: func( - policy policies.Policy, - _ *policies.GlobalSettings, - ) []conditions.Condition { + ValidateStub: func(policy policies.Policy) []conditions.Condition { if policy.GetName() == "pol1" { return []conditions.Condition{staticConds.NewPolicyInvalid("invalid error")} } @@ -808,8 +1000,9 @@ func TestProcessPolicies(t *testing.T) { Conditions: []conditions.Condition{ staticConds.NewPolicyInvalid("invalid error"), }, - Ancestors: []PolicyAncestor{}, - Valid: false, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: false, }, pol2Key: { Source: pol2, @@ -820,8 +1013,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, }, }, @@ -846,8 +1040,9 @@ func TestProcessPolicies(t *testing.T) { Group: v1.GroupName, }, }, - Ancestors: []PolicyAncestor{}, - Valid: true, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: true, }, pol1ConflictKey: { Source: pol1Conflict, @@ -861,27 +1056,32 @@ func TestProcessPolicies(t *testing.T) { Conditions: []conditions.Condition{ staticConds.NewPolicyConflicted("Conflicts with another MyPolicy"), }, - Ancestors: []PolicyAncestor{}, - Valid: false, + Ancestors: []PolicyAncestor{}, + InvalidForGateways: map[types.NamespacedName]struct{}{}, + Valid: false, }, }, }, } - gateways := processedGateways{ - Winner: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: testNs, + gateways := map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gw"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: testNs, + }, }, + Valid: true, }, - Ignored: map[types.NamespacedName]*v1.Gateway{ - {Namespace: testNs, Name: "ignored"}: { + {Namespace: testNs, Name: "gw2"}: { + Source: &v1.Gateway{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw", + Name: "gw2", Namespace: testNs, }, }, + Valid: true, }, } @@ -913,7 +1113,7 @@ func TestProcessPolicies(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processPolicies(test.policies, test.validator, gateways, routes, services, nil) + processed := processPolicies(test.policies, test.validator, routes, services, gateways) g.Expect(processed).To(BeEquivalentTo(test.expProcessedPolicies)) }) } @@ -1039,12 +1239,15 @@ func TestProcessPolicies_RouteOverlap(t *testing.T) { }, } - gateways := processedGateways{ - Winner: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gw", - Namespace: testNs, + gateways := map[types.NamespacedName]*Gateway{ + {Namespace: testNs, Name: "gw"}: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gw", + Namespace: testNs, + }, }, + Valid: true, }, } @@ -1053,7 +1256,7 @@ func TestProcessPolicies_RouteOverlap(t *testing.T) { t.Parallel() g := NewWithT(t) - processed := processPolicies(test.policies, test.validator, gateways, test.routes, nil, nil) + processed := processPolicies(test.policies, test.validator, test.routes, nil, gateways) g.Expect(processed).To(HaveLen(1)) for _, pol := range processed { diff --git a/internal/mode/static/state/graph/route_common.go b/internal/mode/static/state/graph/route_common.go index f40a647c1f..e97c552f52 100644 --- a/internal/mode/static/state/graph/route_common.go +++ b/internal/mode/static/state/graph/route_common.go @@ -31,8 +31,8 @@ type ParentRef struct { SectionName *v1.SectionName // Port is the network port this Route targets. Port *v1.PortNumber - // Gateway is the NamespacedName of the referenced Gateway - Gateway types.NamespacedName + // Gateway is the metadata about the parent Gateway. + Gateway *ParentRefGateway // Idx is the index of the corresponding ParentReference in the Route. Idx int } @@ -40,17 +40,32 @@ type ParentRef struct { // ParentRefAttachmentStatus describes the attachment status of a ParentRef. type ParentRefAttachmentStatus struct { // AcceptedHostnames is an intersection between the hostnames supported by an attached Listener - // and the hostnames from this Route. Key is listener name, value is list of hostnames. + // and the hostnames from this Route. Key is , value is list of hostnames. AcceptedHostnames map[string][]string - // FailedCondition is the condition that describes why the ParentRef is not attached to the Gateway. It is set - // when Attached is false. - FailedCondition conditions.Condition + // FailedConditions are the conditions that describe why the ParentRef is not attached to the Gateway, or other + // failures that may lead to partial attachments. For example, a backendRef could be invalid, but the route can + // still attach. The backendRef condition would be displayed here. + FailedConditions []conditions.Condition // ListenerPort is the port on the Listener that the Route is attached to. ListenerPort v1.PortNumber // Attached indicates if the ParentRef is attached to the Gateway. Attached bool } +// ParentRefGateway contains the NamespacedName and EffectiveNginxProxy of the parent Gateway. +type ParentRefGateway struct { + EffectiveNginxProxy *EffectiveNginxProxy + NamespacedName types.NamespacedName +} + +// CreateParentRefGateway creates a new ParentRefGateway object using a graph.Gateway object. +func CreateParentRefGateway(gateway *Gateway) *ParentRefGateway { + return &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gateway.Source), + EffectiveNginxProxy: gateway.EffectiveNginxProxy, + } +} + type RouteType string const ( @@ -174,6 +189,11 @@ func CreateRouteKeyL4(obj client.Object) L4RouteKey { } } +// CreateGatewayListenerKey creates a key using the Gateway NamespacedName and Listener name. +func CreateGatewayListenerKey(gwNSName types.NamespacedName, listenerName string) string { + return fmt.Sprintf("%s/%s/%s", gwNSName.Namespace, gwNSName.Name, listenerName) +} + type routeRuleErrors struct { invalid field.ErrorList resolve field.ErrorList @@ -188,12 +208,11 @@ func (e routeRuleErrors) append(newErrors routeRuleErrors) routeRuleErrors { func buildL4RoutesForGateways( tlsRoutes map[types.NamespacedName]*v1alpha.TLSRoute, - gatewayNsNames []types.NamespacedName, services map[types.NamespacedName]*apiv1.Service, - npCfg *EffectiveNginxProxy, + gws map[types.NamespacedName]*Gateway, resolver *referenceGrantResolver, ) map[L4RouteKey]*L4Route { - if len(gatewayNsNames) == 0 { + if len(gws) == 0 { return nil } @@ -201,15 +220,15 @@ func buildL4RoutesForGateways( for _, route := range tlsRoutes { r := buildTLSRoute( route, - gatewayNsNames, + gws, services, - npCfg, resolver.refAllowedFrom(fromTLSRoute(route.Namespace)), ) if r != nil { routes[CreateRouteKeyL4(route)] = r } } + return routes } @@ -218,20 +237,17 @@ func buildRoutesForGateways( validator validation.HTTPFieldsValidator, httpRoutes map[types.NamespacedName]*v1.HTTPRoute, grpcRoutes map[types.NamespacedName]*v1.GRPCRoute, - gatewayNsNames []types.NamespacedName, - effectiveNginxProxy *EffectiveNginxProxy, + gateways map[types.NamespacedName]*Gateway, snippetsFilters map[types.NamespacedName]*SnippetsFilter, ) map[RouteKey]*L7Route { - if len(gatewayNsNames) == 0 { + if len(gateways) == 0 { return nil } routes := make(map[RouteKey]*L7Route) - http2disabled := isHTTP2Disabled(effectiveNginxProxy) - for _, route := range httpRoutes { - r := buildHTTPRoute(validator, route, gatewayNsNames, snippetsFilters) + r := buildHTTPRoute(validator, route, gateways, snippetsFilters) if r == nil { continue } @@ -239,11 +255,11 @@ func buildRoutesForGateways( routes[CreateRouteKey(route)] = r // if this route has a RequestMirror filter, build a duplicate route for the mirror - buildHTTPMirrorRoutes(routes, r, route, gatewayNsNames, snippetsFilters) + buildHTTPMirrorRoutes(routes, r, route, gateways, snippetsFilters) } for _, route := range grpcRoutes { - r := buildGRPCRoute(validator, route, gatewayNsNames, http2disabled, snippetsFilters) + r := buildGRPCRoute(validator, route, gateways, snippetsFilters) if r == nil { continue } @@ -251,28 +267,16 @@ func buildRoutesForGateways( routes[CreateRouteKey(route)] = r // if this route has a RequestMirror filter, build a duplicate route for the mirror - buildGRPCMirrorRoutes(routes, r, route, gatewayNsNames, snippetsFilters, http2disabled) + buildGRPCMirrorRoutes(routes, r, route, gateways, snippetsFilters) } return routes } -func isHTTP2Disabled(npCfg *EffectiveNginxProxy) bool { - if npCfg == nil { - return false - } - - if npCfg.DisableHTTP2 == nil { - return false - } - - return *npCfg.DisableHTTP2 -} - func buildSectionNameRefs( parentRefs []v1.ParentReference, routeNamespace string, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, ) ([]ParentRef, error) { sectionNameRefs := make([]ParentRef, 0, len(parentRefs)) @@ -283,8 +287,8 @@ func buildSectionNameRefs( uniqueSectionsPerGateway := make(map[key]struct{}) for i, p := range parentRefs { - gw, found := findGatewayForParentRef(p, routeNamespace, gatewayNsNames) - if !found { + gw := findGatewayForParentRef(p, routeNamespace, gws) + if gw == nil { continue } @@ -293,19 +297,20 @@ func buildSectionNameRefs( sectionName = string(*p.SectionName) } + gwNsName := client.ObjectKeyFromObject(gw.Source) k := key{ - gwNsName: gw, + gwNsName: gwNsName, sectionName: sectionName, } if _, exist := uniqueSectionsPerGateway[k]; exist { - return nil, fmt.Errorf("duplicate section name %q for Gateway %s", sectionName, gw.String()) + return nil, fmt.Errorf("duplicate section name %q for Gateway %s", sectionName, gwNsName.String()) } uniqueSectionsPerGateway[k] = struct{}{} sectionNameRefs = append(sectionNameRefs, ParentRef{ Idx: i, - Gateway: gw, + Gateway: CreateParentRefGateway(gw), SectionName: p.SectionName, Port: p.Port, }) @@ -317,85 +322,98 @@ func buildSectionNameRefs( func findGatewayForParentRef( ref v1.ParentReference, routeNamespace string, - gatewayNsNames []types.NamespacedName, -) (gwNsName types.NamespacedName, found bool) { + gws map[types.NamespacedName]*Gateway, +) *Gateway { if ref.Kind != nil && *ref.Kind != kinds.Gateway { - return types.NamespacedName{}, false + return nil } if ref.Group != nil && *ref.Group != v1.GroupName { - return types.NamespacedName{}, false + return nil } - // if the namespace is missing, assume the namespace of the HTTPRoute + // if the namespace is missing, assume the namespace of the Route ns := routeNamespace if ref.Namespace != nil { ns = string(*ref.Namespace) } - for _, gw := range gatewayNsNames { - if gw.Namespace == ns && gw.Name == string(ref.Name) { - return gw, true - } + key := types.NamespacedName{ + Namespace: ns, + Name: string(ref.Name), } - return types.NamespacedName{}, false + if gw, exists := gws[key]; exists { + return gw + } + + return nil } func bindRoutesToListeners( l7Routes map[RouteKey]*L7Route, l4Routes map[L4RouteKey]*L4Route, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, namespaces map[types.NamespacedName]*apiv1.Namespace, ) { - if gw == nil { + if len(gws) == 0 { return } - for _, r := range l7Routes { - bindL7RouteToListeners(r, gw, namespaces) - } + for _, gw := range gws { + for _, r := range l7Routes { + bindL7RouteToListeners(r, gw, namespaces) + } - routes := make([]*L7Route, 0, len(l7Routes)) - for _, r := range l7Routes { - routes = append(routes, r) - } + routes := make([]*L7Route, 0, len(l7Routes)) + for _, r := range l7Routes { + routes = append(routes, r) + } - listenerMap := getListenerHostPortMap(gw.Listeners) - isolateL7RouteListeners(routes, listenerMap) + listenerMap := getListenerHostPortMap(gw.Listeners, gw) + isolateL7RouteListeners(routes, listenerMap) - l4RouteSlice := make([]*L4Route, 0, len(l4Routes)) - for _, r := range l4Routes { - l4RouteSlice = append(l4RouteSlice, r) - } + l4RouteSlice := make([]*L4Route, 0, len(l4Routes)) + for _, r := range l4Routes { + l4RouteSlice = append(l4RouteSlice, r) + } - // Sort the slice by timestamp and name so that we process the routes in the priority order - sort.Slice(l4RouteSlice, func(i, j int) bool { - return ngfSort.LessClientObject(l4RouteSlice[i].Source, l4RouteSlice[j].Source) - }) + // Sort the slice by timestamp and name so that we process the routes in the priority order + sort.Slice(l4RouteSlice, func(i, j int) bool { + return ngfSort.LessClientObject(l4RouteSlice[i].Source, l4RouteSlice[j].Source) + }) - // portHostnamesMap exists to detect duplicate hostnames on the same port - portHostnamesMap := make(map[string]struct{}) + // portHostnamesMap exists to detect duplicate hostnames on the same port + portHostnamesMap := make(map[string]struct{}) - for _, r := range l4RouteSlice { - bindL4RouteToListeners(r, gw, namespaces, portHostnamesMap) - } + for _, r := range l4RouteSlice { + bindL4RouteToListeners(r, gw, namespaces, portHostnamesMap) + } - isolateL4RouteListeners(l4RouteSlice, listenerMap) + isolateL4RouteListeners(l4RouteSlice, listenerMap) + } } type hostPort struct { + gwNsName types.NamespacedName hostname string port v1.PortNumber } -func getListenerHostPortMap(listeners []*Listener) map[string]hostPort { +func getListenerHostPortMap(listeners []*Listener, gw *Gateway) map[string]hostPort { listenerHostPortMap := make(map[string]hostPort, len(listeners)) + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } for _, l := range listeners { - listenerHostPortMap[l.Name] = hostPort{ + key := CreateGatewayListenerKey(client.ObjectKeyFromObject(gw.Source), l.Name) + listenerHostPortMap[key] = hostPort{ hostname: getHostname(l.Source.Hostname), port: l.Source.Port, + gwNsName: gwNsName, } } + return listenerHostPortMap } @@ -425,22 +443,31 @@ func isolateHostnamesForParentRefs(parentRef []ParentRef, listenerHostnameMap ma continue } + if ref.Attachment == nil { + continue + } + acceptedHostnames := ref.Attachment.AcceptedHostnames hostnamesToRemoves := make(map[string]struct{}) - for listenerName, hostnames := range acceptedHostnames { + for key, hostnames := range acceptedHostnames { if len(hostnames) == 0 { continue } for _, h := range hostnames { for lName, lHostPort := range listenerHostnameMap { + // skip comparison if not part of the same gateway + if lHostPort.gwNsName != ref.Gateway.NamespacedName { + continue + } + // skip comparison if it is a catch all listener block if lHostPort.hostname == "" { continue } - // for L7Routes, we compare the hostname, port and listener name combination + // for L7Routes, we compare the hostname, port and listenerName combination // to identify if hostname needs to be isolated. - if h == lHostPort.hostname && listenerName != lName { + if h == lHostPort.hostname && key != lName { // for L4Routes, we only compare the hostname and listener name combination // because we do not allow l4Routes to attach to the same listener // if they share the same port and hostname. @@ -452,7 +479,7 @@ func isolateHostnamesForParentRefs(parentRef []ParentRef, listenerHostnameMap ma } isolatedHostnames := removeHostnames(hostnames, hostnamesToRemoves) - ref.Attachment.AcceptedHostnames[listenerName] = isolatedHostnames + ref.Attachment.AcceptedHostnames[key] = isolatedHostnames } } } @@ -488,7 +515,7 @@ func validateParentRef( // Case 1: Attachment is not possible because the specified SectionName does not match any Listeners in the // Gateway. if !listenerExists { - attachment.FailedCondition = staticConds.NewRouteNoMatchingParent() + attachment.FailedConditions = append(attachment.FailedConditions, staticConds.NewRouteNoMatchingParent()) return attachment, nil } @@ -496,25 +523,19 @@ func validateParentRef( if ref.Port != nil { valErr := field.Forbidden(path.Child("port"), "cannot be set") - attachment.FailedCondition = staticConds.NewRouteUnsupportedValue(valErr.Error()) - return attachment, attachableListeners - } - - // Case 3: the parentRef references an ignored Gateway resource. - - referencesWinningGw := ref.Gateway.Namespace == gw.Source.Namespace && ref.Gateway.Name == gw.Source.Name - - if !referencesWinningGw { - attachment.FailedCondition = staticConds.NewRouteNotAcceptedGatewayIgnored() + attachment.FailedConditions = append( + attachment.FailedConditions, staticConds.NewRouteUnsupportedValue(valErr.Error()), + ) return attachment, attachableListeners } - // Case 4: Attachment is not possible because Gateway is invalid + // Case 3: Attachment is not possible because Gateway is invalid if !gw.Valid { - attachment.FailedCondition = staticConds.NewRouteInvalidGateway() + attachment.FailedConditions = append(attachment.FailedConditions, staticConds.NewRouteInvalidGateway()) return attachment, attachableListeners } + return attachment, attachableListeners } @@ -531,13 +552,25 @@ func bindL4RouteToListeners( for i := range route.ParentRefs { ref := &(route.ParentRefs)[i] + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } + + if ref.Gateway.NamespacedName != gwNsName { + continue + } + attachment, attachableListeners := validateParentRef(ref, gw) - if attachment.FailedCondition != (conditions.Condition{}) { + if len(attachment.FailedConditions) > 0 { continue } - // Winning Gateway + if cond, ok := route.Spec.BackendRef.InvalidForGateways[gwNsName]; ok { + attachment.FailedConditions = append(attachment.FailedConditions, cond) + } + // Try to attach Route to all matching listeners cond, attached := tryToAttachL4RouteToListeners( @@ -549,7 +582,7 @@ func bindL4RouteToListeners( portHostnamesMap, ) if !attached { - attachment.FailedCondition = cond + attachment.FailedConditions = append(attachment.FailedConditions, cond) continue } if cond != (conditions.Condition{}) { @@ -666,7 +699,7 @@ func bindToListenerL4( return true, false, true } - refStatus.AcceptedHostnames[string(l.Source.Name)] = hostnames + refStatus.AcceptedHostnames[CreateGatewayListenerKey(l.GatewayName, l.Name)] = hostnames l.L4Routes[CreateRouteKeyL4(route.Source)] = route return true, true, true @@ -684,13 +717,36 @@ func bindL7RouteToListeners( for i := range route.ParentRefs { ref := &(route.ParentRefs)[i] + gwNsName := types.NamespacedName{ + Name: gw.Source.Name, + Namespace: gw.Source.Namespace, + } + + if ref.Gateway.NamespacedName != gwNsName { + continue + } + attachment, attachableListeners := validateParentRef(ref, gw) - if attachment.FailedCondition != (conditions.Condition{}) { + if route.RouteType == RouteTypeGRPC && isHTTP2Disabled(gw.EffectiveNginxProxy) { + msg := "HTTP2 is disabled - cannot configure GRPCRoutes" + attachment.FailedConditions = append( + attachment.FailedConditions, staticConds.NewRouteUnsupportedConfiguration(msg), + ) + } + + if len(attachment.FailedConditions) > 0 { continue } - // Winning Gateway + for _, rule := range route.Spec.Rules { + for _, backendRef := range rule.BackendRefs { + if cond, ok := backendRef.InvalidForGateways[gwNsName]; ok { + attachment.FailedConditions = append(attachment.FailedConditions, cond) + } + } + } + // Try to attach Route to all matching listeners cond, attached := tryToAttachL7RouteToListeners( @@ -701,7 +757,7 @@ func bindL7RouteToListeners( namespaces, ) if !attached { - attachment.FailedCondition = cond + attachment.FailedConditions = append(attachment.FailedConditions, cond) continue } if cond != (conditions.Condition{}) { @@ -712,6 +768,18 @@ func bindL7RouteToListeners( } } +func isHTTP2Disabled(npCfg *EffectiveNginxProxy) bool { + if npCfg == nil { + return false + } + + if npCfg.DisableHTTP2 == nil { + return false + } + + return *npCfg.DisableHTTP2 +} + // tryToAttachRouteToListeners tries to attach the route to the listeners that match the parentRef and the hostnames. // There are two cases: // (1) If it succeeds in attaching at least one listener it will return true. The returned condition will be empty if @@ -744,7 +812,7 @@ func tryToAttachL7RouteToListeners( return true, false } - refStatus.AcceptedHostnames[string(l.Source.Name)] = hostnames + refStatus.AcceptedHostnames[CreateGatewayListenerKey(l.GatewayName, l.Name)] = hostnames refStatus.ListenerPort = l.Source.Port l.Routes[rk] = route diff --git a/internal/mode/static/state/graph/route_common_test.go b/internal/mode/static/state/graph/route_common_test.go index 8268af036e..dc127bb3b8 100644 --- a/internal/mode/static/state/graph/route_common_test.go +++ b/internal/mode/static/state/graph/route_common_test.go @@ -54,27 +54,44 @@ func TestBuildSectionNameRefs(t *testing.T) { }, } - gwNsNames := []types.NamespacedName{gwNsName1, gwNsName2} + gws := map[types.NamespacedName]*Gateway{ + gwNsName1: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName1.Name, + Namespace: gwNsName1.Namespace, + }, + }, + }, + gwNsName2: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName2.Name, + Namespace: gwNsName2.Namespace, + }, + }, + }, + } expected := []ParentRef{ { Idx: 0, - Gateway: gwNsName1, + Gateway: CreateParentRefGateway(gws[gwNsName1]), SectionName: parentRefs[0].SectionName, }, { Idx: 2, - Gateway: gwNsName2, + Gateway: CreateParentRefGateway(gws[gwNsName2]), SectionName: parentRefs[2].SectionName, }, { Idx: 3, - Gateway: gwNsName1, + Gateway: CreateParentRefGateway(gws[gwNsName1]), SectionName: parentRefs[3].SectionName, }, { Idx: 4, - Gateway: gwNsName2, + Gateway: CreateParentRefGateway(gws[gwNsName2]), SectionName: parentRefs[4].SectionName, }, } @@ -126,7 +143,7 @@ func TestBuildSectionNameRefs(t *testing.T) { t.Parallel() g := NewWithT(t) - result, err := buildSectionNameRefs(test.parentRefs, routeNamespace, gwNsNames) + result, err := buildSectionNameRefs(test.parentRefs, routeNamespace, gws) g.Expect(result).To(Equal(test.expectedRefs)) if test.expectedError != nil { g.Expect(err).To(Equal(test.expectedError)) @@ -181,35 +198,46 @@ func TestFindGatewayForParentRef(t *testing.T) { Kind: helpers.GetPointer[gatewayv1.Kind]("NotGateway"), Name: gatewayv1.ObjectName(gwNsName2.Name), }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "wrong kind", + expectedFound: false, + name: "wrong kind", }, { ref: gatewayv1.ParentReference{ Group: helpers.GetPointer[gatewayv1.Group]("wrong-group"), Name: gatewayv1.ObjectName(gwNsName2.Name), }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "wrong group", + expectedFound: false, + name: "wrong group", }, { ref: gatewayv1.ParentReference{ Namespace: helpers.GetPointer(gatewayv1.Namespace(gwNsName1.Namespace)), Name: "some-gateway", }, - expectedFound: false, - expectedGwNsName: types.NamespacedName{}, - name: "not found", + expectedFound: false, + name: "not found", }, } routeNamespace := "test-2" - gwNsNames := []types.NamespacedName{ - gwNsName1, - gwNsName2, + gws := map[types.NamespacedName]*Gateway{ + gwNsName1: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName1.Name, + Namespace: gwNsName1.Namespace, + }, + }, + }, + gwNsName2: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: gwNsName2.Name, + Namespace: gwNsName2.Namespace, + }, + }, + }, } for _, test := range tests { @@ -217,9 +245,13 @@ func TestFindGatewayForParentRef(t *testing.T) { t.Parallel() g := NewWithT(t) - gw, found := findGatewayForParentRef(test.ref, routeNamespace, gwNsNames) - g.Expect(found).To(Equal(test.expectedFound)) - g.Expect(gw).To(Equal(test.expectedGwNsName)) + gw := findGatewayForParentRef(test.ref, routeNamespace, gws) + if test.expectedFound { + g.Expect(gw).ToNot(BeNil()) + g.Expect(client.ObjectKeyFromObject(gw.Source)).To(Equal(test.expectedGwNsName)) + } else { + g.Expect(gw).To(BeNil()) + } }) } } @@ -229,6 +261,10 @@ func TestBindRouteToListeners(t *testing.T) { createListener := func(name string) *Listener { return &Listener{ Name: name, + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Source: gatewayv1.Listener{ Name: gatewayv1.SectionName(name), Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("foo.example.com")), @@ -316,7 +352,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -336,7 +372,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -349,7 +385,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -363,7 +399,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNilSectionName.Spec.ParentRefs[0].SectionName, }, }, @@ -376,7 +412,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, }, }, @@ -389,7 +425,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNonExistingListener.Spec.ParentRefs[0].SectionName, }, }, @@ -402,26 +438,12 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithPort.Spec.ParentRefs[0].SectionName, Port: hrWithPort.Spec.ParentRefs[0].Port, }, }, } - ignoredGwNsName := types.NamespacedName{Namespace: "test", Name: "ignored-gateway"} - routeWithIgnoredGateway := &L7Route{ - RouteType: RouteTypeHTTP, - Source: hr, - Valid: true, - Attachable: true, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: ignoredGwNsName, - SectionName: hr.Spec.ParentRefs[0].SectionName, - }, - }, - } invalidRoute := &L7Route{ RouteType: RouteTypeHTTP, Valid: false, @@ -429,7 +451,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, }, }, @@ -443,6 +465,19 @@ func TestBindRouteToListeners(t *testing.T) { l.Source.Hostname = helpers.GetPointer[gatewayv1.Hostname]("bar.example.com") }) + routeWithInvalidBackendRefs := createNormalHTTPRoute(gw) + routeWithInvalidBackendRefs.Spec.Rules = []RouteRule{ + { + BackendRefs: []BackendRef{ + { + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + client.ObjectKeyFromObject(gw): {Message: "invalid backend"}, + }, + }, + }, + }, + } + createGRPCRouteWithSectionNameAndPort := func( sectionName *gatewayv1.SectionName, port *gatewayv1.PortNumber, @@ -487,7 +522,7 @@ func TestBindRouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: gr.Spec.ParentRefs[0].SectionName, }, }, @@ -519,12 +554,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -550,12 +588,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNilSectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -582,13 +623,19 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80": {"foo.example.com"}, - "listener-8080": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80", + ): {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-8080", + ): {"foo.example.com"}, }, }, }, @@ -619,11 +666,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithEmptySectionName.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidListener(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidListener()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -645,13 +692,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithPort.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteUnsupportedValue( - `spec.parentRefs[0].port: Forbidden: cannot be set`, - ), + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedValue( + `spec.parentRefs[0].port: Forbidden: cannot be set`, + ), + }, AcceptedHostnames: map[string][]string{}, }, Port: hrWithPort.Spec.ParentRefs[0].Port, @@ -674,11 +723,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hrWithNonExistingListener.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -700,11 +749,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidListener(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidListener()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -726,11 +775,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNoMatchingListenerHostname(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingListenerHostname()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -740,32 +789,6 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "no matching listener hostname", }, - { - route: routeWithIgnoredGateway, - gateway: &Gateway{ - Source: gw, - Valid: true, - Listeners: []*Listener{ - createListener("listener-80-1"), - }, - }, - expectedSectionNameRefs: []ParentRef{ - { - Idx: 0, - Gateway: ignoredGwNsName, - SectionName: hr.Spec.ParentRefs[0].SectionName, - Attachment: &ParentRefAttachmentStatus{ - Attached: false, - FailedCondition: staticConds.NewRouteNotAcceptedGatewayIgnored(), - AcceptedHostnames: map[string][]string{}, - }, - }, - }, - expectedGatewayListeners: []*Listener{ - createListener("listener-80-1"), - }, - name: "gateway is ignored", - }, { route: invalidRoute, gateway: &Gateway{ @@ -778,7 +801,7 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: nil, SectionName: hr.Spec.ParentRefs[0].SectionName, }, @@ -800,11 +823,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteInvalidGateway(), + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidGateway()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -828,12 +851,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -861,12 +887,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -894,12 +923,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -935,11 +967,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -977,12 +1009,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1021,11 +1056,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwDiffNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwDiffNamespace)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -1059,12 +1094,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1101,12 +1139,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwDiffNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwDiffNamespace)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1144,11 +1185,11 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: gr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: false, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, AcceptedHostnames: map[string][]string{}, }, }, @@ -1165,6 +1206,55 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "grpc route not allowed when listener kind is HTTPRoute", }, + { + route: createNormalGRPCRoute(gw), + gateway: &Gateway{ + Source: gw, + Valid: true, + Listeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.SupportedKinds = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(gr): getLastNormalGRPCRoute(), + } + }), + }, + EffectiveNginxProxy: &EffectiveNginxProxy{ + DisableHTTP2: helpers.GetPointer(true), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{ + NamespacedName: client.ObjectKeyFromObject(gw), + }, + SectionName: gr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: false, + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedConfiguration( + `HTTP2 is disabled - cannot configure GRPCRoutes`, + ), + }, + AcceptedHostnames: map[string][]string{}, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.SupportedKinds = []gatewayv1.RouteGroupKind{ + {Kind: gatewayv1.Kind(kinds.HTTPRoute), Group: helpers.GetPointer[gatewayv1.Group](gatewayv1.GroupName)}, + } + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(gr): getLastNormalGRPCRoute(), + } + }), + }, + name: "grpc route not allowed when HTTP2 is disabled", + }, { route: createNormalHTTPRoute(gw), gateway: &Gateway{ @@ -1186,12 +1276,15 @@ func TestBindRouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-80-1": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, }, }, }, @@ -1210,6 +1303,43 @@ func TestBindRouteToListeners(t *testing.T) { }, name: "http route allowed when listener kind is HTTPRoute", }, + { + route: routeWithInvalidBackendRefs, + gateway: &Gateway{ + Source: gw, + Valid: true, + Listeners: []*Listener{ + createListener("listener-80-1"), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{ + {Message: "invalid backend"}, + }, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-80-1", + ): {"foo.example.com"}, + }, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-80-1", func(l *Listener) { + l.Routes = map[RouteKey]*L7Route{ + CreateRouteKey(hr): routeWithInvalidBackendRefs, + } + }), + }, + name: "route still allowed if backendRef failure conditions exist", + }, } namespaces := map[types.NamespacedName]*v1.Namespace{ @@ -1220,6 +1350,7 @@ func TestBindRouteToListeners(t *testing.T) { }, }, } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) @@ -1490,6 +1621,10 @@ func TestBindL4RouteToListeners(t *testing.T) { createListener := func(name string) *Listener { return &Listener{ Name: name, + GatewayName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Source: gatewayv1.Listener{ Name: gatewayv1.SectionName(name), Hostname: (*gatewayv1.Hostname)(helpers.GetPointer("foo.example.com")), @@ -1568,7 +1703,7 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gateway), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gateway)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1587,7 +1722,7 @@ func TestBindL4RouteToListeners(t *testing.T) { noMatchingParentAttachment := ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingParent(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingParent()}, } notAttachableRoute := &L4Route{ @@ -1599,7 +1734,7 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1613,27 +1748,19 @@ func TestBindL4RouteToListeners(t *testing.T) { ParentRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Port: helpers.GetPointer[gatewayv1.PortNumber](80), }, }, Attachable: true, } - routeReferencesWrongNamespace := &L4Route{ - Source: tr, - Spec: L4RouteSpec{ - Hostnames: tr.Spec.Hostnames, - }, - Valid: true, - ParentRefs: []ParentRef{ - { - Idx: 0, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), - SectionName: tr.Spec.ParentRefs[0].SectionName, - }, + + routeWithInvalidBackendRefs := createNormalRoute(gw) + routeWithInvalidBackendRefs.Spec.BackendRef = BackendRef{ + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + client.ObjectKeyFromObject(gw): {Message: "invalid backend"}, }, - Attachable: true, } tests := []struct { @@ -1649,6 +1776,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1656,12 +1787,15 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, }, @@ -1680,6 +1814,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1687,7 +1825,7 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, }, }, @@ -1701,6 +1839,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-444"), }, @@ -1709,7 +1851,7 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &noMatchingParentAttachment, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -1723,6 +1865,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1731,16 +1877,15 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "UnsupportedValue", - Message: "spec.parentRefs[0].port: Forbidden: cannot be set", + FailedConditions: []conditions.Condition{ + staticConds.NewRouteUnsupportedValue( + `spec.parentRefs[0].port: Forbidden: cannot be set`, + ), }, Attached: false, }, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, Port: helpers.GetPointer[gatewayv1.PortNumber](80), }, @@ -1750,42 +1895,15 @@ func TestBindL4RouteToListeners(t *testing.T) { }, name: "port is not nil", }, - { - route: routeReferencesWrongNamespace, - gateway: &Gateway{ - Source: gw, - Valid: true, - Listeners: []*Listener{ - createListener("listener-443"), - }, - }, - expectedSectionNameRefs: []ParentRef{ - { - Attachment: &ParentRefAttachmentStatus{ - AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "GatewayIgnored", - Message: "The Gateway is ignored by the controller", - }, - Attached: false, - }, - SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), - Idx: 0, - }, - }, - expectedGatewayListeners: []*Listener{ - createListener("listener-443"), - }, - name: "ignored gateway", - }, { route: createNormalRoute(gw), gateway: &Gateway{ Source: gw, Valid: false, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1794,16 +1912,11 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "InvalidGateway", - Message: "Gateway is invalid", - }, - Attached: false, + FailedConditions: []conditions.Condition{staticConds.NewRouteInvalidGateway()}, + Attached: false, }, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -1817,8 +1930,13 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gwWrongNamespace, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "wrong", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { + l.GatewayName = client.ObjectKeyFromObject(gwWrongNamespace) l.Source.AllowedRoutes = &gatewayv1.AllowedRoutes{ Namespaces: &gatewayv1.RouteNamespaces{From: helpers.GetPointer( gatewayv1.FromNamespaces("Same"), @@ -1830,21 +1948,17 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gwWrongNamespace), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gwWrongNamespace)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: conditions.Condition{ - Type: "Accepted", - Status: "False", - Reason: "NotAllowedByListeners", - Message: "Route is not allowed by any listener", - }, + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, }, }, }, expectedGatewayListeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { + l.GatewayName = client.ObjectKeyFromObject(gwWrongNamespace) l.Source.AllowedRoutes = &gatewayv1.AllowedRoutes{ Namespaces: &gatewayv1.RouteNamespaces{From: helpers.GetPointer( gatewayv1.FromNamespaces("Same"), @@ -1859,6 +1973,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.Valid = false @@ -1868,11 +1986,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, Attached: true, }, @@ -1886,11 +2007,14 @@ func TestBindL4RouteToListeners(t *testing.T) { r.ParentRefs = []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, Attached: true, }, @@ -1908,7 +2032,11 @@ func TestBindL4RouteToListeners(t *testing.T) { route: createNormalRoute(gw), gateway: &Gateway{ Source: gw, - Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.Source.Hostname = (*gatewayv1.Hostname)(helpers.GetPointer("*.example.org")) @@ -1918,11 +2046,11 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNoMatchingListenerHostname(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNoMatchingListenerHostname()}, }, }, }, @@ -1940,6 +2068,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1947,11 +2079,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, }, @@ -1972,6 +2107,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createListener("listener-443"), }, @@ -1979,11 +2118,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, SectionName: helpers.GetPointer[gatewayv1.SectionName](""), @@ -2001,7 +2143,11 @@ func TestBindL4RouteToListeners(t *testing.T) { { route: createNormalRoute(gw), gateway: &Gateway{ - Source: gw, + Source: gw, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Valid: true, Listeners: []*Listener{}, }, @@ -2009,7 +2155,7 @@ func TestBindL4RouteToListeners(t *testing.T) { { Attachment: &noMatchingParentAttachment, SectionName: tr.Spec.ParentRefs[0].SectionName, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Idx: 0, }, }, @@ -2022,7 +2168,11 @@ func TestBindL4RouteToListeners(t *testing.T) { }), gateway: &Gateway{ Source: gw, - Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Valid: true, Listeners: []*Listener{ createListener("listener-443"), }, @@ -2030,11 +2180,14 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ Attached: true, AcceptedHostnames: map[string][]string{ - "listener-443": {"foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, }, }, SectionName: helpers.GetPointer[gatewayv1.SectionName]("listener-443"), @@ -2054,6 +2207,10 @@ func TestBindL4RouteToListeners(t *testing.T) { gateway: &Gateway{ Source: gw, Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, Listeners: []*Listener{ createModifiedListener("listener-443", func(l *Listener) { l.SupportedKinds = nil @@ -2063,10 +2220,10 @@ func TestBindL4RouteToListeners(t *testing.T) { expectedSectionNameRefs: []ParentRef{ { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{}, - FailedCondition: staticConds.NewRouteNotAllowedByListeners(), + FailedConditions: []conditions.Condition{staticConds.NewRouteNotAllowedByListeners()}, }, SectionName: helpers.GetPointer[gatewayv1.SectionName]("listener-443"), }, @@ -2078,6 +2235,47 @@ func TestBindL4RouteToListeners(t *testing.T) { }, name: "route kind not allowed", }, + { + route: routeWithInvalidBackendRefs, + gateway: &Gateway{ + Source: gw, + Valid: true, + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + Listeners: []*Listener{ + createListener("listener-443"), + }, + }, + expectedSectionNameRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tr.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{ + {Message: "invalid backend"}, + }, + AcceptedHostnames: map[string][]string{ + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "listener-443", + ): {"foo.example.com"}, + }, + }, + }, + }, + expectedGatewayListeners: []*Listener{ + createModifiedListener("listener-443", func(l *Listener) { + l.L4Routes = map[L4RouteKey]*L4Route{ + CreateRouteKeyL4(tr): routeWithInvalidBackendRefs, + } + }), + }, + name: "route still allowed if backendRef failure conditions exist", + }, } namespaces := map[types.NamespacedName]*v1.Namespace{ @@ -2133,7 +2331,6 @@ func TestBuildL4RoutesForGateways_NoGateways(t *testing.T) { g.Expect(buildL4RoutesForGateways( tlsRoutes, - nil, services, nil, refGrantResolver, @@ -2176,6 +2373,11 @@ func TestTryToAttachL4RouteToListeners_NoAttachableListeners(t *testing.T) { g.Expect(attachable).To(BeFalse()) } +type parentRef struct { + sectionName *gatewayv1.SectionName + gw types.NamespacedName +} + func TestIsolateL4Listeners(t *testing.T) { t.Parallel() gw := &gatewayv1.Gateway{ @@ -2185,12 +2387,26 @@ func TestIsolateL4Listeners(t *testing.T) { }, } + gw1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway1", + }, + } + createTLSRouteWithSectionNameAndPort := func( name string, - sectionName *gatewayv1.SectionName, + parentRef []parentRef, ns string, hostnames ...gatewayv1.Hostname, ) *v1alpha2.TLSRoute { + var parentRefs []gatewayv1.ParentReference + for _, p := range parentRef { + parentRefs = append(parentRefs, gatewayv1.ParentReference{ + Name: gatewayv1.ObjectName(p.gw.Name), + SectionName: p.sectionName, + }) + } return &v1alpha2.TLSRoute{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -2198,12 +2414,7 @@ func TestIsolateL4Listeners(t *testing.T) { }, Spec: v1alpha2.TLSRouteSpec{ CommonRouteSpec: gatewayv1.CommonRouteSpec{ - ParentRefs: []gatewayv1.ParentReference{ - { - Name: gatewayv1.ObjectName(gw.Name), - SectionName: sectionName, - }, - }, + ParentRefs: parentRefs, }, Hostnames: hostnames, }, @@ -2213,31 +2424,56 @@ func TestIsolateL4Listeners(t *testing.T) { routeHostnames := []gatewayv1.Hostname{"bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com"} tr1 := createTLSRouteWithSectionNameAndPort( "tr1", - helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + }, + }, "test", routeHostnames..., ) tr2 := createTLSRouteWithSectionNameAndPort( "tr2", - helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, "test", routeHostnames..., ) tr3 := createTLSRouteWithSectionNameAndPort( "tr3", - helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + }, + }, "test", routeHostnames..., ) tr4 := createTLSRouteWithSectionNameAndPort( "tr4", - helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + }, + }, "test", routeHostnames..., ) tr5 := createTLSRouteWithSectionNameAndPort( "tr5", - helpers.GetPointer[gatewayv1.SectionName]("no-match"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("no-match"), + }, + }, "test", routeHostnames..., ) @@ -2256,11 +2492,8 @@ func TestIsolateL4Listeners(t *testing.T) { }, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKey{ - Namespace: gw.Namespace, - Name: gw.Name, - }, + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: sectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: acceptedHostnames, @@ -2273,29 +2506,29 @@ func TestIsolateL4Listeners(t *testing.T) { } acceptedHostnamesEmptyHostname := map[string][]string{ - "empty-hostname": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { "bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesWildcardExample := map[string][]string{ - "wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesFooWildcardExample := map[string][]string{ - "foo-wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesAbcCom := map[string][]string{ - "abc-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { "abc.foo.example.com", }, } acceptedHostnamesNoMatch := map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, } routesHostnameIntersection := []*L4Route{ @@ -2336,22 +2569,42 @@ func TestIsolateL4Listeners(t *testing.T) { } listenerMapHostnameIntersection := map[string]hostPort{ - "empty-hostname": {hostname: "", port: 80}, - "wildcard-example-com": {hostname: "*.example.com", port: 80}, - "foo-wildcard-example-com": {hostname: "*.foo.example.com", port: 80}, - "abc-com": {hostname: "abc.foo.example.com", port: 80}, - "no-match": {hostname: "no-match.cafe.com", port: 80}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { + hostname: "", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { + hostname: "*.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { + hostname: "abc.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): { + hostname: "no-match.cafe.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, } expectedResultHostnameIntersection := map[string][]ParentRef{ "tr1": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "empty-hostname": {"bar.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): {"bar.com"}, }, Attached: true, ListenerPort: 80, @@ -2361,11 +2614,14 @@ func TestIsolateL4Listeners(t *testing.T) { "tr2": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr2.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "wildcard-example-com": {"*.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "wildcard-example-com", + ): {"*.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2375,11 +2631,14 @@ func TestIsolateL4Listeners(t *testing.T) { "tr3": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "foo-wildcard-example-com": {"*.foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "foo-wildcard-example-com", + ): {"*.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2389,11 +2648,11 @@ func TestIsolateL4Listeners(t *testing.T) { "tr4": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr4.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "abc-com": {"abc.foo.example.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): {"abc.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2403,11 +2662,11 @@ func TestIsolateL4Listeners(t *testing.T) { "tr5": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: tr5.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, }, Attached: true, ListenerPort: 80, @@ -2444,6 +2703,43 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_flavor": {"flavor.example.com"}, } + routeHostname := []gatewayv1.Hostname{"coffee.example.com", "flavor.example.com"} + acceptedHostanamesMultipleGateways := map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + } + tlsCoffeeRoute1 := createTLSRouteWithSectionNameAndPort( + "tls_coffee", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + + tlsFlavorRoute1 := createTLSRouteWithSectionNameAndPort( + "tls_flavor", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + tests := []struct { expectedResult map[string][]ParentRef listenerMap map[string]hostPort @@ -2482,15 +2778,15 @@ func TestIsolateL4Listeners(t *testing.T) { ), }, listenerMap: map[string]hostPort{ - "tls_coffee": {hostname: "coffee.example.com", port: 443}, - "tls_tea": {hostname: "tea.example.com", port: 443}, - "tls_flavor": {hostname: "flavor.example.com", port: 443}, + "tls_coffee,test,gateway": {hostname: "coffee.example.com", port: 443}, + "tls_tea,test,gateway": {hostname: "tea.example.com", port: 443}, + "tls_flavor,test,gateway": {hostname: "flavor.example.com", port: 443}, }, expectedResult: map[string][]ParentRef{ "tls_coffee": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2505,7 +2801,7 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_tea": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2520,7 +2816,7 @@ func TestIsolateL4Listeners(t *testing.T) { "tls_flavor": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "tls_coffee": {"coffee.example.com"}, @@ -2534,6 +2830,137 @@ func TestIsolateL4Listeners(t *testing.T) { }, }, }, + { + name: "no listener isolation for routes with overlapping hostnames but different gateways", + routes: []*L4Route{ + { + Source: tlsCoffeeRoute1, + Spec: L4RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + }, + }, + { + Source: tlsFlavorRoute1, + Spec: L4RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostanamesMultipleGateways, + Attached: true, + ListenerPort: gatewayv1.PortNumber(443), + }, + }, + }, + }, + }, + listenerMap: map[string]hostPort{ + "wildcard-example-com,test,gateway": { + hostname: "*.example.com", + port: 443, + gwNsName: client.ObjectKeyFromObject(gw), + }, + "wildcard-example-com,test,gateway1": { + hostname: "*.example.com", + port: 443, + gwNsName: client.ObjectKeyFromObject(gw), + }, + }, + expectedResult: map[string][]ParentRef{ + "tls_coffee": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + }, + "tls_flavor": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: tlsCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "tls_coffee": {"coffee.example.com", "flavor.example.com"}, + "tls_flavor": {"coffee.example.com", "flavor.example.com"}, + }, + ListenerPort: 443, + Attached: true, + }, + }, + }, + }, + }, } for _, test := range tests { @@ -2560,12 +2987,26 @@ func TestIsolateL7Listeners(t *testing.T) { }, } + gw1 := &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway1", + }, + } + createHTTPRouteWithSectionNameAndPort := func( name string, - sectionName *gatewayv1.SectionName, + parentRef []parentRef, ns string, hostnames ...gatewayv1.Hostname, ) *gatewayv1.HTTPRoute { + var parentRefs []gatewayv1.ParentReference + for _, p := range parentRef { + parentRefs = append(parentRefs, gatewayv1.ParentReference{ + Name: gatewayv1.ObjectName(p.gw.Name), + SectionName: p.sectionName, + }) + } return &gatewayv1.HTTPRoute{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -2573,12 +3014,7 @@ func TestIsolateL7Listeners(t *testing.T) { }, Spec: gatewayv1.HTTPRouteSpec{ CommonRouteSpec: gatewayv1.CommonRouteSpec{ - ParentRefs: []gatewayv1.ParentReference{ - { - Name: gatewayv1.ObjectName(gw.Name), - SectionName: sectionName, - }, - }, + ParentRefs: parentRefs, }, Hostnames: hostnames, }, @@ -2599,11 +3035,8 @@ func TestIsolateL7Listeners(t *testing.T) { }, ParentRefs: []ParentRef{ { - Idx: 0, - Gateway: client.ObjectKey{ - Namespace: gw.Namespace, - Name: gw.Name, - }, + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: sectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: acceptedHostnames, @@ -2618,59 +3051,84 @@ func TestIsolateL7Listeners(t *testing.T) { routeHostnames := []gatewayv1.Hostname{"bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com"} hr1 := createHTTPRouteWithSectionNameAndPort( "hr1", - helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("empty-hostname"), + }, + }, "test", routeHostnames..., ) hr2 := createHTTPRouteWithSectionNameAndPort( "hr2", - helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, "test", routeHostnames..., ) hr3 := createHTTPRouteWithSectionNameAndPort( "hr3", - helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("foo-wildcard-example-com"), + }, + }, "test", routeHostnames..., ) hr4 := createHTTPRouteWithSectionNameAndPort( "hr4", - helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("abc-com"), + }, + }, "test", routeHostnames..., ) hr5 := createHTTPRouteWithSectionNameAndPort( "hr5", - helpers.GetPointer[gatewayv1.SectionName]("no-match"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("no-match"), + }, + }, "test", routeHostnames..., // no matching hostname ) acceptedHostnamesEmptyHostname := map[string][]string{ - "empty-hostname": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { "bar.com", "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesWildcardExample := map[string][]string{ - "wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { "*.example.com", "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesFooWildcardExample := map[string][]string{ - "foo-wildcard-example-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { "*.foo.example.com", "abc.foo.example.com", }, } acceptedHostnamesAbcCom := map[string][]string{ - "abc-com": { + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { "abc.foo.example.com", }, } acceptedHostnamesNoMatch := map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, } routesHostnameIntersection := []*L7Route{ @@ -2712,22 +3170,42 @@ func TestIsolateL7Listeners(t *testing.T) { } listenerMapHostnameIntersection := map[string]hostPort{ - "empty-hostname": {hostname: "", port: 80}, - "wildcard-example-com": {hostname: "*.example.com", port: 80}, - "foo-wildcard-example-com": {hostname: "*.foo.example.com", port: 80}, - "abc-com": {hostname: "abc.foo.example.com", port: 80}, - "no-match": {hostname: "no-match.cafe.com", port: 80}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): { + hostname: "", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "wildcard-example-com"): { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "foo-wildcard-example-com"): { + hostname: "*.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): { + hostname: "abc.foo.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): { + hostname: "no-match.cafe.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, } expectedResultHostnameIntersection := map[string][]ParentRef{ "hr1": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr1.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "empty-hostname": {"bar.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "empty-hostname"): {"bar.com"}, }, Attached: true, ListenerPort: 80, @@ -2737,11 +3215,14 @@ func TestIsolateL7Listeners(t *testing.T) { "hr2": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr2.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "wildcard-example-com": {"*.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "wildcard-example-com", + ): {"*.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2751,11 +3232,14 @@ func TestIsolateL7Listeners(t *testing.T) { "hr3": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr3.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "foo-wildcard-example-com": {"*.foo.example.com"}, + CreateGatewayListenerKey( + client.ObjectKeyFromObject(gw), + "foo-wildcard-example-com", + ): {"*.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2765,11 +3249,11 @@ func TestIsolateL7Listeners(t *testing.T) { "hr4": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr4.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "abc-com": {"abc.foo.example.com"}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "abc-com"): {"abc.foo.example.com"}, }, Attached: true, ListenerPort: 80, @@ -2779,11 +3263,11 @@ func TestIsolateL7Listeners(t *testing.T) { "hr5": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: hr5.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ - "no-match": {}, + CreateGatewayListenerKey(client.ObjectKeyFromObject(gw), "no-match"): {}, }, Attached: true, ListenerPort: 80, @@ -2795,7 +3279,12 @@ func TestIsolateL7Listeners(t *testing.T) { routeHostnameCafeExample := []gatewayv1.Hostname{"cafe.example.com"} httpListenerRoute := createHTTPRouteWithSectionNameAndPort( "hr_cafe", - helpers.GetPointer[gatewayv1.SectionName]("http"), + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("http"), + }, + }, "test", routeHostnameCafeExample..., ) @@ -2834,6 +3323,45 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_flavor": {"flavor.example.com"}, } + routeHostname := []gatewayv1.Hostname{"cafe.example.com", "flavor.example.com"} + + acceptedHostNamesMultipleGateway := map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + } + + hrCoffeeRoute1 := createHTTPRouteWithSectionNameAndPort( + "hr_coffee", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + + hrFlavorRoute1 := createHTTPRouteWithSectionNameAndPort( + "hr_flavor", + []parentRef{ + { + gw: client.ObjectKeyFromObject(gw), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + { + gw: client.ObjectKeyFromObject(gw1), + sectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + }, + }, + "test", + routeHostname..., + ) + tests := []struct { expectedResult map[string][]ParentRef listenersMap map[string]hostPort @@ -2858,14 +3386,14 @@ func TestIsolateL7Listeners(t *testing.T) { ), }, listenersMap: map[string]hostPort{ - "http": {hostname: "cafe.example.com", port: 80}, - "http-different": {hostname: "cafe.example.com", port: 8080}, + "http,test,gateway": {hostname: "cafe.example.com", port: 80}, + "http-different,test,gateway": {hostname: "cafe.example.com", port: 8080}, }, expectedResult: map[string][]ParentRef{ "hr_cafe": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, SectionName: httpListenerRoute.Spec.ParentRefs[0].SectionName, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ @@ -2904,15 +3432,15 @@ func TestIsolateL7Listeners(t *testing.T) { ), }, listenersMap: map[string]hostPort{ - "hr_coffee": {hostname: "coffee.example.com", port: 80}, - "hr_tea": {hostname: "tea.example.com", port: 80}, - "hr_flavor": {hostname: "flavor.example.com", port: 80}, + "hr_coffee,test,gateway": {hostname: "coffee.example.com", port: 80}, + "hr_tea,test,gateway": {hostname: "tea.example.com", port: 80}, + "hr_flavor,test,gateway": {hostname: "flavor.example.com", port: 80}, }, expectedResult: map[string][]ParentRef{ "hr_coffee": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2927,7 +3455,7 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_tea": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2942,7 +3470,7 @@ func TestIsolateL7Listeners(t *testing.T) { "hr_flavor": { { Idx: 0, - Gateway: client.ObjectKeyFromObject(gw), + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, Attachment: &ParentRefAttachmentStatus{ AcceptedHostnames: map[string][]string{ "hr_coffee": {"coffee.example.com"}, @@ -2956,6 +3484,137 @@ func TestIsolateL7Listeners(t *testing.T) { }, }, }, + { + name: "no listener isolation for routes with same hostname, associated with different gateways", + routes: []*L7Route{ + { + Source: hrCoffeeRoute1, + Spec: L7RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + }, + }, + { + Source: hrFlavorRoute1, + Spec: L7RouteSpec{ + Hostnames: routeHostname, + }, + ParentRefs: []ParentRef{ + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: helpers.GetPointer[gatewayv1.SectionName]("wildcard-example-com"), + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: acceptedHostNamesMultipleGateway, + Attached: true, + ListenerPort: gatewayv1.PortNumber(80), + }, + }, + }, + }, + }, + listenersMap: map[string]hostPort{ + "wildcard-example-com,test,gateway": { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw), + }, + "wildcard-example-com,test,gateway1": { + hostname: "*.example.com", + port: 80, + gwNsName: client.ObjectKeyFromObject(gw1), + }, + }, + expectedResult: map[string][]ParentRef{ + "hr_coffee": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrCoffeeRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrCoffeeRoute1.Spec.ParentRefs[1].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + }, + "hr_flavor": { + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + { + Idx: 0, + Gateway: &ParentRefGateway{NamespacedName: client.ObjectKeyFromObject(gw)}, + SectionName: hrFlavorRoute1.Spec.ParentRefs[0].SectionName, + Attachment: &ParentRefAttachmentStatus{ + AcceptedHostnames: map[string][]string{ + "hr_cafe": {"cafe.example.com", "flavor.example.com"}, + "hr_flavor": {"cafe.example.com", "flavor.example.com"}, + }, + ListenerPort: 80, + Attached: true, + }, + }, + }, + }, + }, } for _, test := range tests { @@ -3018,3 +3677,12 @@ func TestRemoveHostnames(t *testing.T) { }) } } + +func TestBindRoutesToListeners(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + g.Expect(func() { + bindRoutesToListeners(nil, nil, nil, nil) + }).ToNot(Panic()) +} diff --git a/internal/mode/static/state/graph/service.go b/internal/mode/static/state/graph/service.go index ad6fb817ef..7a41b07132 100644 --- a/internal/mode/static/state/graph/service.go +++ b/internal/mode/static/state/graph/service.go @@ -5,10 +5,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// A ReferencedService represents a Kubernetes Service that is referenced by a Route and that belongs to the -// winning Gateway. It does not contain the v1.Service object, because Services are resolved when building +// A ReferencedService represents a Kubernetes Service that is referenced by a Route and the Gateways it belongs to. +// It does not contain the v1.Service object, because Services are resolved when building // the dataplane.Configuration. type ReferencedService struct { + // GatewayNsNames are all the Gateways that this Service indirectly attaches to through a Route. + GatewayNsNames map[types.NamespacedName]struct{} // Policies is a list of NGF Policies that target this Service. Policies []*Policy } @@ -16,72 +18,43 @@ type ReferencedService struct { func buildReferencedServices( l7routes map[RouteKey]*L7Route, l4Routes map[L4RouteKey]*L4Route, - gw *Gateway, + gws map[types.NamespacedName]*Gateway, ) map[types.NamespacedName]*ReferencedService { - if gw == nil { - return nil - } - referencedServices := make(map[types.NamespacedName]*ReferencedService) - - belongsToWinningGw := func(refs []ParentRef) bool { - for _, ref := range refs { - if ref.Gateway == client.ObjectKeyFromObject(gw.Source) { - return true - } + for gwNsName, gw := range gws { + if gw == nil { + continue } - return false - } - - // Processes both valid and invalid BackendRefs as invalid ones still have referenced services - // we may want to track. - addServicesForL7Routes := func(routeRules []RouteRule) { - for _, rule := range routeRules { - for _, ref := range rule.BackendRefs { - if ref.SvcNsName != (types.NamespacedName{}) { - referencedServices[ref.SvcNsName] = &ReferencedService{ - Policies: nil, - } + belongsToGw := func(refs []ParentRef) bool { + for _, ref := range refs { + if ref.Gateway.NamespacedName == client.ObjectKeyFromObject(gw.Source) { + return true } } + return false } - } - addServicesForL4Routes := func(route *L4Route) { - nsname := route.Spec.BackendRef.SvcNsName - if nsname != (types.NamespacedName{}) { - referencedServices[nsname] = &ReferencedService{ - Policies: nil, + // routes all have populated ParentRefs from when they were created. + // + // Get all the service names referenced from all the l7 and l4 routes. + for _, route := range l7routes { + if !route.Valid || !belongsToGw(route.ParentRefs) { + continue } - } - } - - // routes all have populated ParentRefs from when they were created. - // - // Get all the service names referenced from all the l7 and l4 routes. - for _, route := range l7routes { - if !route.Valid { - continue - } - if !belongsToWinningGw(route.ParentRefs) { - continue + // Processes both valid and invalid BackendRefs as invalid ones still have referenced services + // we may want to track. + addServicesAndGatewayForL7Routes(route.Spec.Rules, gwNsName, referencedServices) } - addServicesForL7Routes(route.Spec.Rules) - } - - for _, route := range l4Routes { - if !route.Valid { - continue - } + for _, route := range l4Routes { + if !route.Valid || !belongsToGw(route.ParentRefs) { + continue + } - if !belongsToWinningGw(route.ParentRefs) { - continue + addServicesAndGatewayForL4Routes(route, gwNsName, referencedServices) } - - addServicesForL4Routes(route) } if len(referencedServices) == 0 { @@ -90,3 +63,41 @@ func buildReferencedServices( return referencedServices } + +func addServicesAndGatewayForL4Routes( + route *L4Route, + gwNsName types.NamespacedName, + referencedServices map[types.NamespacedName]*ReferencedService, +) { + nsname := route.Spec.BackendRef.SvcNsName + if nsname != (types.NamespacedName{}) { + if _, ok := referencedServices[nsname]; !ok { + referencedServices[nsname] = &ReferencedService{ + Policies: nil, + GatewayNsNames: make(map[types.NamespacedName]struct{}), + } + } + referencedServices[nsname].GatewayNsNames[gwNsName] = struct{}{} + } +} + +func addServicesAndGatewayForL7Routes( + routeRules []RouteRule, + gwNsName types.NamespacedName, + referencedServices map[types.NamespacedName]*ReferencedService, +) { + for _, rule := range routeRules { + for _, ref := range rule.BackendRefs { + if ref.SvcNsName != (types.NamespacedName{}) { + if _, ok := referencedServices[ref.SvcNsName]; !ok { + referencedServices[ref.SvcNsName] = &ReferencedService{ + Policies: nil, + GatewayNsNames: make(map[types.NamespacedName]struct{}), + } + } + + referencedServices[ref.SvcNsName].GatewayNsNames[gwNsName] = struct{}{} + } + } + } +} diff --git a/internal/mode/static/state/graph/service_test.go b/internal/mode/static/state/graph/service_test.go index 0fa316e73f..e0ef7180ce 100644 --- a/internal/mode/static/state/graph/service_test.go +++ b/internal/mode/static/state/graph/service_test.go @@ -12,25 +12,49 @@ import ( func TestBuildReferencedServices(t *testing.T) { t.Parallel() - gwNsname := types.NamespacedName{Namespace: "test", Name: "gwNsname"} - gw := &Gateway{ - Source: &v1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: gwNsname.Namespace, - Name: gwNsname.Name, + gwNsName := types.NamespacedName{Namespace: "test", Name: "gwNsname"} + gw2NsName := types.NamespacedName{Namespace: "test", Name: "gw2Nsname"} + gw3NsName := types.NamespacedName{Namespace: "test", Name: "gw3Nsname"} + gw := map[types.NamespacedName]*Gateway{ + gwNsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gwNsName.Namespace, + Name: gwNsName.Name, + }, + }, + }, + gw2NsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gw2NsName.Namespace, + Name: gw2NsName.Name, + }, + }, + }, + gw3NsName: { + Source: &v1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: gw3NsName.Namespace, + Name: gw3NsName.Name, + }, }, }, } - ignoredGw := types.NamespacedName{Namespace: "test", Name: "ignoredGw"} + + parentRefs := []ParentRef{ + { + Gateway: &ParentRefGateway{NamespacedName: gwNsName}, + }, + { + Gateway: &ParentRefGateway{NamespacedName: gw2NsName}, + }, + } getNormalL7Route := func() *L7Route { return &L7Route{ - ParentRefs: []ParentRef{ - { - Gateway: gwNsname, - }, - }, - Valid: true, + ParentRefs: parentRefs, + Valid: true, Spec: L7RouteSpec{ Rules: []RouteRule{ { @@ -57,12 +81,8 @@ func TestBuildReferencedServices(t *testing.T) { SvcNsName: types.NamespacedName{Namespace: "tlsroute-ns", Name: "service"}, }, }, - Valid: true, - ParentRefs: []ParentRef{ - { - Gateway: gwNsname, - }, - }, + Valid: true, + ParentRefs: parentRefs, } } @@ -137,56 +157,16 @@ func TestBuildReferencedServices(t *testing.T) { return route }) - normalL4RouteWinningAndIgnoredGws := getModifiedL4Route(func(route *L4Route) *L4Route { - route.ParentRefs = []ParentRef{ - { - Gateway: ignoredGw, - }, - { - Gateway: ignoredGw, - }, - { - Gateway: gwNsname, - }, - } - return route - }) - - normalRouteWinningAndIgnoredGws := getModifiedL7Route(func(route *L7Route) *L7Route { - route.ParentRefs = []ParentRef{ - { - Gateway: ignoredGw, - }, - { - Gateway: gwNsname, - }, - { - Gateway: ignoredGw, - }, - } - return route - }) - - normalL4RouteIgnoredGw := getModifiedL4Route(func(route *L4Route) *L4Route { - route.ParentRefs[0].Gateway = ignoredGw - return route - }) - - normalL7RouteIgnoredGw := getModifiedL7Route(func(route *L7Route) *L7Route { - route.ParentRefs[0].Gateway = ignoredGw - return route - }) - tests := []struct { l7Routes map[RouteKey]*L7Route l4Routes map[L4RouteKey]*L4Route exp map[types.NamespacedName]*ReferencedService - gw *Gateway + gws map[types.NamespacedName]*Gateway name string }{ { name: "normal routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, }, @@ -194,35 +174,65 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "banana-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "l7 route with two services in one Rule", // l4 routes don't support multiple services right now - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "two-svc-one-rule"}}: validRouteTwoServicesOneRule, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "route with one service per rule", // l4 routes don't support multiple rules right now - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "multiple valid routes with same services", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, {NamespacedName: types.NamespacedName{Name: "two-svc-one-rule"}}: validRouteTwoServicesOneRule, @@ -233,57 +243,35 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "l4-route-same-svc-as-l7-route"}}: normalL4RouteWithSameSvcAsL7Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service2"}: {}, - }, - }, - { - name: "valid routes that do not belong to winning gateway", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gws"}}: normalL7RouteIgnoredGw, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gw"}}: normalL4RouteIgnoredGw, - }, - exp: nil, - }, - { - name: "valid routes that belong to both winning and ignored gateways", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "belongs-to-ignored-gws"}}: normalRouteWinningAndIgnoredGws, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "ignored-gw"}}: normalL4RouteWinningAndIgnoredGws, - }, - exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, - }, - }, - { - name: "valid routes with different services", - gw: gw, - l7Routes: map[RouteKey]*L7Route{ - {NamespacedName: types.NamespacedName{Name: "one-svc-per-rule"}}: validRouteTwoServicesTwoRules, - {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, - }, - l4Routes: map[L4RouteKey]*L4Route{ - {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, - }, - exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "service-ns", Name: "service"}: {}, - {Namespace: "service-ns2", Name: "service2"}: {}, - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "service-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "service-ns2", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service2"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "invalid routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "invalid-route"}}: invalidRoute, }, @@ -294,7 +282,7 @@ func TestBuildReferencedServices(t *testing.T) { }, { name: "combination of valid and invalid routes", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "normal-route"}}: normalRoute, {NamespacedName: types.NamespacedName{Name: "invalid-route"}}: invalidRoute, @@ -304,13 +292,23 @@ func TestBuildReferencedServices(t *testing.T) { {NamespacedName: types.NamespacedName{Name: "normal-l4-route"}}: normalL4Route, }, exp: map[types.NamespacedName]*ReferencedService{ - {Namespace: "banana-ns", Name: "service"}: {}, - {Namespace: "tlsroute-ns", Name: "service"}: {}, + {Namespace: "banana-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, + {Namespace: "tlsroute-ns", Name: "service"}: { + GatewayNsNames: map[types.NamespacedName]struct{}{ + {Namespace: "test", Name: "gwNsname"}: {}, + {Namespace: "test", Name: "gw2Nsname"}: {}, + }, + }, }, }, { name: "valid route no service nsname", - gw: gw, + gws: gw, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "no-service-nsname"}}: validRouteNoServiceNsName, }, @@ -321,7 +319,9 @@ func TestBuildReferencedServices(t *testing.T) { }, { name: "nil gateway", - gw: nil, + gws: map[types.NamespacedName]*Gateway{ + gwNsName: nil, + }, l7Routes: map[RouteKey]*L7Route{ {NamespacedName: types.NamespacedName{Name: "no-service-nsname"}}: validRouteNoServiceNsName, }, @@ -337,7 +337,7 @@ func TestBuildReferencedServices(t *testing.T) { t.Parallel() g := NewWithT(t) - g.Expect(buildReferencedServices(test.l7Routes, test.l4Routes, test.gw)).To(Equal(test.exp)) + g.Expect(buildReferencedServices(test.l7Routes, test.l4Routes, test.gws)).To(Equal(test.exp)) }) } } diff --git a/internal/mode/static/state/graph/tlsroute.go b/internal/mode/static/state/graph/tlsroute.go index 051cd134db..8e471afad0 100644 --- a/internal/mode/static/state/graph/tlsroute.go +++ b/internal/mode/static/state/graph/tlsroute.go @@ -7,22 +7,20 @@ import ( "sigs.k8s.io/gateway-api/apis/v1alpha2" "github.com/nginx/nginx-gateway-fabric/internal/framework/conditions" - "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" staticConds "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/conditions" ) func buildTLSRoute( gtr *v1alpha2.TLSRoute, - gatewayNsNames []types.NamespacedName, + gws map[types.NamespacedName]*Gateway, services map[types.NamespacedName]*apiv1.Service, - npCfg *EffectiveNginxProxy, refGrantResolver func(resource toResource) bool, ) *L4Route { r := &L4Route{ Source: gtr, } - sectionNameRefs, err := buildSectionNameRefs(gtr.Spec.ParentRefs, gtr.Namespace, gatewayNsNames) + sectionNameRefs, err := buildSectionNameRefs(gtr.Spec.ParentRefs, gtr.Namespace, gws) if err != nil { r.Valid = false @@ -54,14 +52,14 @@ func buildTLSRoute( return r } - br, cond := validateBackendRefTLSRoute(gtr, services, npCfg, refGrantResolver) + br, conds := validateBackendRefTLSRoute(gtr, services, r.ParentRefs, refGrantResolver) r.Spec.BackendRef = br r.Valid = true r.Attachable = true - if cond != nil { - r.Conditions = append(r.Conditions, *cond) + if len(conds) > 0 { + r.Conditions = append(r.Conditions, conds...) } return r @@ -70,9 +68,9 @@ func buildTLSRoute( func validateBackendRefTLSRoute( gtr *v1alpha2.TLSRoute, services map[types.NamespacedName]*apiv1.Service, - npCfg *EffectiveNginxProxy, + parentRefs []ParentRef, refGrantResolver func(resource toResource) bool, -) (BackendRef, *conditions.Condition) { +) (BackendRef, []conditions.Condition) { // Length of BackendRefs and Rules is guaranteed to be one due to earlier check in buildTLSRoute refPath := field.NewPath("spec").Child("rules").Index(0).Child("backendRefs").Index(0) @@ -85,10 +83,11 @@ func validateBackendRefTLSRoute( refPath, ); !valid { backendRef := BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } - return backendRef, &cond + return backendRef, []conditions.Condition{cond} } ns := gtr.Namespace @@ -109,22 +108,25 @@ func validateBackendRefTLSRoute( ) backendRef := BackendRef{ - SvcNsName: svcNsName, - ServicePort: svcPort, - Valid: true, + SvcNsName: svcNsName, + ServicePort: svcPort, + Valid: true, + InvalidForGateways: make(map[types.NamespacedName]conditions.Condition), } if err != nil { backendRef.Valid = false - return backendRef, helpers.GetPointer(staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())) + return backendRef, []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound(err.Error())} } - if err := verifyIPFamily(npCfg, svcIPFamily); err != nil { - backendRef.Valid = false - - return backendRef, helpers.GetPointer(staticConds.NewRouteInvalidIPFamily(err.Error())) + var conds []conditions.Condition + for _, parentRef := range parentRefs { + if err := verifyIPFamily(parentRef.Gateway.EffectiveNginxProxy, svcIPFamily); err != nil { + backendRef.Valid = backendRef.Valid || false + backendRef.InvalidForGateways[parentRef.Gateway.NamespacedName] = staticConds.NewRouteInvalidIPFamily(err.Error()) + } } - return backendRef, nil + return backendRef, conds } diff --git a/internal/mode/static/state/graph/tlsroute_test.go b/internal/mode/static/state/graph/tlsroute_test.go index 40af4729aa..988013a594 100644 --- a/internal/mode/static/state/graph/tlsroute_test.go +++ b/internal/mode/static/state/graph/tlsroute_test.go @@ -7,6 +7,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" "sigs.k8s.io/gateway-api/apis/v1alpha2" @@ -44,13 +45,31 @@ func TestBuildTLSRoute(t *testing.T) { Name: "gateway", SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), } - gatewayNsName := types.NamespacedName{ - Namespace: "test", - Name: "gateway", + + createGateway := func() *Gateway { + return &Gateway{ + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + } + } + + modGateway := func(gw *Gateway, mod func(*Gateway) *Gateway) *Gateway { + return mod(gw) } + parentRefGraph := ParentRef{ SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), - Gateway: gatewayNsName, + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + }, } duplicateParentRefsGtr := createTLSRoute( "hi.example.com", @@ -267,13 +286,12 @@ func TestBuildTLSRoute(t *testing.T) { alwaysFalseRefGrantResolver := func(_ toResource) bool { return false } tests := []struct { - expected *L4Route - gtr *v1alpha2.TLSRoute - services map[types.NamespacedName]*apiv1.Service - resolver func(resource toResource) bool - npCfg *EffectiveNginxProxy - name string - gatewayNsNames []types.NamespacedName + expected *L4Route + gtr *v1alpha2.TLSRoute + services map[types.NamespacedName]*apiv1.Service + resolver func(resource toResource) bool + gateway *Gateway + name string }{ { gtr: duplicateParentRefsGtr, @@ -281,18 +299,18 @@ func TestBuildTLSRoute(t *testing.T) { Source: duplicateParentRefsGtr, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "duplicate parent refs", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "duplicate parent refs", }, { - gtr: noParentRefsGtr, - expected: nil, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "no parent refs", + gtr: noParentRefsGtr, + expected: nil, + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "no parent refs", }, { gtr: invalidHostnameGtr, @@ -308,10 +326,10 @@ func TestBuildTLSRoute(t *testing.T) { )}, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "invalid hostname", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "invalid hostname", }, { gtr: noRulesGtr, @@ -328,10 +346,10 @@ func TestBuildTLSRoute(t *testing.T) { )}, Valid: false, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "invalid rule", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "invalid rule", }, { gtr: backedRefDNEGtr, @@ -347,7 +365,8 @@ func TestBuildTLSRoute(t *testing.T) { Namespace: "test", Name: "hi", }, - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefRefBackendNotFound( @@ -356,10 +375,10 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, - services: map[types.NamespacedName]*apiv1.Service{}, - resolver: alwaysTrueRefGrantResolver, - name: "BackendRef not found", + gateway: createGateway(), + services: map[types.NamespacedName]*apiv1.Service{}, + resolver: alwaysTrueRefGrantResolver, + name: "BackendRef not found", }, { gtr: wrongBackendRefGroupGtr, @@ -371,7 +390,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefInvalidKind( @@ -381,7 +401,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: createSvc("hi", 80), }, @@ -398,7 +418,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefInvalidKind( @@ -408,7 +429,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: createSvc("hi", 80), }, @@ -425,7 +446,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefRefNotPermitted( @@ -435,7 +457,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: diffNsSvc, }, @@ -452,7 +474,8 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - Valid: false, + Valid: false, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Conditions: []conditions.Condition{staticConds.NewRouteBackendRefUnsupportedValue( @@ -461,7 +484,7 @@ func TestBuildTLSRoute(t *testing.T) { Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: createSvc("hi", 80), }, @@ -471,8 +494,19 @@ func TestBuildTLSRoute(t *testing.T) { { gtr: ipFamilyMismatchGtr, expected: &L4Route{ - Source: ipFamilyMismatchGtr, - ParentRefs: []ParentRef{parentRefGraph}, + Source: ipFamilyMismatchGtr, + ParentRefs: []ParentRef{ + { + SectionName: helpers.GetPointer[gatewayv1.SectionName]("l1"), + Gateway: &ParentRefGateway{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "gateway", + }, + EffectiveNginxProxy: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, + }, + }, + }, Spec: L4RouteSpec{ Hostnames: []gatewayv1.Hostname{ "app.example.com", @@ -480,19 +514,24 @@ func TestBuildTLSRoute(t *testing.T) { BackendRef: BackendRef{ SvcNsName: svcNsName, ServicePort: apiv1.ServicePort{Port: 80}, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{ + {Namespace: "test", Name: "gateway"}: staticConds.NewRouteInvalidIPFamily( + "service configured with IPv4 family but NginxProxy is configured with IPv6", + ), + }, + Valid: true, }, }, - Conditions: []conditions.Condition{staticConds.NewRouteInvalidIPFamily( - "service configured with IPv4 family but NginxProxy is configured with IPv6", - )}, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: modGateway(createGateway(), func(gw *Gateway) *Gateway { + gw.EffectiveNginxProxy = &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)} + return gw + }), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: ipv4Svc, }, - npCfg: &EffectiveNginxProxy{IPFamily: helpers.GetPointer(ngfAPI.IPv6)}, resolver: alwaysTrueRefGrantResolver, name: "service and npcfg ip family mismatch", }, @@ -506,15 +545,16 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - SvcNsName: diffSvcNsName, - ServicePort: apiv1.ServicePort{Port: 80}, - Valid: true, + SvcNsName: diffSvcNsName, + ServicePort: apiv1.ServicePort{Port: 80}, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ diffSvcNsName: diffNsSvc, }, @@ -531,15 +571,16 @@ func TestBuildTLSRoute(t *testing.T) { "app.example.com", }, BackendRef: BackendRef{ - SvcNsName: svcNsName, - ServicePort: apiv1.ServicePort{Port: 80}, - Valid: true, + SvcNsName: svcNsName, + ServicePort: apiv1.ServicePort{Port: 80}, + Valid: true, + InvalidForGateways: map[types.NamespacedName]conditions.Condition{}, }, }, Attachable: true, Valid: true, }, - gatewayNsNames: []types.NamespacedName{gatewayNsName}, + gateway: createGateway(), services: map[types.NamespacedName]*apiv1.Service{ svcNsName: ipv4Svc, }, @@ -555,9 +596,8 @@ func TestBuildTLSRoute(t *testing.T) { r := buildTLSRoute( test.gtr, - test.gatewayNsNames, + map[types.NamespacedName]*Gateway{client.ObjectKeyFromObject(test.gateway.Source): test.gateway}, test.services, - test.npCfg, test.resolver, ) g.Expect(helpers.Diff(test.expected, r)).To(BeEmpty()) diff --git a/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go b/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go index 4460ec36e3..59883a9fc7 100644 --- a/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go +++ b/internal/mode/static/state/validation/validationfakes/fake_policy_validator.go @@ -22,11 +22,10 @@ type FakePolicyValidator struct { conflictsReturnsOnCall map[int]struct { result1 bool } - ValidateStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + ValidateStub func(policies.Policy) []conditions.Condition validateMutex sync.RWMutex validateArgsForCall []struct { arg1 policies.Policy - arg2 *policies.GlobalSettings } validateReturns struct { result1 []conditions.Condition @@ -34,6 +33,18 @@ type FakePolicyValidator struct { validateReturnsOnCall map[int]struct { result1 []conditions.Condition } + ValidateGlobalSettingsStub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition + validateGlobalSettingsMutex sync.RWMutex + validateGlobalSettingsArgsForCall []struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + } + validateGlobalSettingsReturns struct { + result1 []conditions.Condition + } + validateGlobalSettingsReturnsOnCall map[int]struct { + result1 []conditions.Condition + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -100,19 +111,18 @@ func (fake *FakePolicyValidator) ConflictsReturnsOnCall(i int, result1 bool) { }{result1} } -func (fake *FakePolicyValidator) Validate(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { +func (fake *FakePolicyValidator) Validate(arg1 policies.Policy) []conditions.Condition { fake.validateMutex.Lock() ret, specificReturn := fake.validateReturnsOnCall[len(fake.validateArgsForCall)] fake.validateArgsForCall = append(fake.validateArgsForCall, struct { arg1 policies.Policy - arg2 *policies.GlobalSettings - }{arg1, arg2}) + }{arg1}) stub := fake.ValidateStub fakeReturns := fake.validateReturns - fake.recordInvocation("Validate", []interface{}{arg1, arg2}) + fake.recordInvocation("Validate", []interface{}{arg1}) fake.validateMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1) } if specificReturn { return ret.result1 @@ -126,17 +136,17 @@ func (fake *FakePolicyValidator) ValidateCallCount() int { return len(fake.validateArgsForCall) } -func (fake *FakePolicyValidator) ValidateCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { +func (fake *FakePolicyValidator) ValidateCalls(stub func(policies.Policy) []conditions.Condition) { fake.validateMutex.Lock() defer fake.validateMutex.Unlock() fake.ValidateStub = stub } -func (fake *FakePolicyValidator) ValidateArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { +func (fake *FakePolicyValidator) ValidateArgsForCall(i int) policies.Policy { fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() argsForCall := fake.validateArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakePolicyValidator) ValidateReturns(result1 []conditions.Condition) { @@ -162,6 +172,68 @@ func (fake *FakePolicyValidator) ValidateReturnsOnCall(i int, result1 []conditio }{result1} } +func (fake *FakePolicyValidator) ValidateGlobalSettings(arg1 policies.Policy, arg2 *policies.GlobalSettings) []conditions.Condition { + fake.validateGlobalSettingsMutex.Lock() + ret, specificReturn := fake.validateGlobalSettingsReturnsOnCall[len(fake.validateGlobalSettingsArgsForCall)] + fake.validateGlobalSettingsArgsForCall = append(fake.validateGlobalSettingsArgsForCall, struct { + arg1 policies.Policy + arg2 *policies.GlobalSettings + }{arg1, arg2}) + stub := fake.ValidateGlobalSettingsStub + fakeReturns := fake.validateGlobalSettingsReturns + fake.recordInvocation("ValidateGlobalSettings", []interface{}{arg1, arg2}) + fake.validateGlobalSettingsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsCallCount() int { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + return len(fake.validateGlobalSettingsArgsForCall) +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsCalls(stub func(policies.Policy, *policies.GlobalSettings) []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = stub +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsArgsForCall(i int) (policies.Policy, *policies.GlobalSettings) { + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() + argsForCall := fake.validateGlobalSettingsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsReturns(result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + fake.validateGlobalSettingsReturns = struct { + result1 []conditions.Condition + }{result1} +} + +func (fake *FakePolicyValidator) ValidateGlobalSettingsReturnsOnCall(i int, result1 []conditions.Condition) { + fake.validateGlobalSettingsMutex.Lock() + defer fake.validateGlobalSettingsMutex.Unlock() + fake.ValidateGlobalSettingsStub = nil + if fake.validateGlobalSettingsReturnsOnCall == nil { + fake.validateGlobalSettingsReturnsOnCall = make(map[int]struct { + result1 []conditions.Condition + }) + } + fake.validateGlobalSettingsReturnsOnCall[i] = struct { + result1 []conditions.Condition + }{result1} +} + func (fake *FakePolicyValidator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -169,6 +241,8 @@ func (fake *FakePolicyValidator) Invocations() map[string][][]interface{} { defer fake.conflictsMutex.RUnlock() fake.validateMutex.RLock() defer fake.validateMutex.RUnlock() + fake.validateGlobalSettingsMutex.RLock() + defer fake.validateGlobalSettingsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/mode/static/state/validation/validator.go b/internal/mode/static/state/validation/validator.go index 26bf281b70..f012c3c6ec 100644 --- a/internal/mode/static/state/validation/validator.go +++ b/internal/mode/static/state/validation/validator.go @@ -55,7 +55,9 @@ type GenericValidator interface { //counterfeiter:generate . PolicyValidator type PolicyValidator interface { // Validate validates an NGF Policy. - Validate(policy policies.Policy, globalSettings *policies.GlobalSettings) []conditions.Condition + Validate(policy policies.Policy) []conditions.Condition + // ValidateGlobalSettings validates an NGF Policy with the NginxProxy settings. + ValidateGlobalSettings(policy policies.Policy, globalSettings *policies.GlobalSettings) []conditions.Condition // Conflicts returns true if the two Policies conflict. Conflicts(a, b policies.Policy) bool } diff --git a/internal/mode/static/status/prepare_requests.go b/internal/mode/static/status/prepare_requests.go index f3bd39c2a3..fc0cfe358a 100644 --- a/internal/mode/static/status/prepare_requests.go +++ b/internal/mode/static/status/prepare_requests.go @@ -111,8 +111,8 @@ func prepareRouteStatus( for _, ref := range parentRefs { failedAttachmentCondCount := 0 - if ref.Attachment != nil && !ref.Attachment.Attached { - failedAttachmentCondCount = 1 + if ref.Attachment != nil { + failedAttachmentCondCount = len(ref.Attachment.FailedConditions) } allConds := make([]conditions.Condition, 0, len(conds)+len(defaultConds)+failedAttachmentCondCount) @@ -120,8 +120,8 @@ func prepareRouteStatus( // ensured by DeduplicateConditions. allConds = append(allConds, defaultConds...) allConds = append(allConds, conds...) - if failedAttachmentCondCount == 1 { - allConds = append(allConds, ref.Attachment.FailedCondition) + if failedAttachmentCondCount > 0 { + allConds = append(allConds, ref.Attachment.FailedConditions...) } if nginxReloadRes.Error != nil { @@ -136,8 +136,8 @@ func prepareRouteStatus( ps := v1.RouteParentStatus{ ParentRef: v1.ParentReference{ - Namespace: helpers.GetPointer(v1.Namespace(ref.Gateway.Namespace)), - Name: v1.ObjectName(ref.Gateway.Name), + Namespace: helpers.GetPointer(v1.Namespace(ref.Gateway.NamespacedName.Namespace)), + Name: v1.ObjectName(ref.Gateway.NamespacedName.Name), SectionName: ref.SectionName, }, ControllerName: v1.GatewayController(gatewayCtlrName), @@ -205,28 +205,16 @@ func PrepareGatewayClassRequests( // PrepareGatewayRequests prepares status UpdateRequests for the given Gateways. func PrepareGatewayRequests( gateway *graph.Gateway, - ignoredGateways map[types.NamespacedName]*v1.Gateway, transitionTime metav1.Time, gwAddresses []v1.GatewayStatusAddress, nginxReloadRes graph.NginxReloadResult, ) []frameworkStatus.UpdateRequest { - reqs := make([]frameworkStatus.UpdateRequest, 0, 1+len(ignoredGateways)) + reqs := make([]frameworkStatus.UpdateRequest, 0, 1) if gateway != nil { reqs = append(reqs, prepareGatewayRequest(gateway, transitionTime, gwAddresses, nginxReloadRes)) } - for nsname, gw := range ignoredGateways { - apiConds := conditions.ConvertConditions(staticConds.NewGatewayConflict(), gw.Generation, transitionTime) - reqs = append(reqs, frameworkStatus.UpdateRequest{ - NsName: nsname, - ResourceType: &v1.Gateway{}, - Setter: newGatewayStatusSetter(v1.GatewayStatus{ - Conditions: apiConds, - }), - }) - } - return reqs } @@ -383,19 +371,24 @@ func PrepareBackendTLSPolicyRequests( conds := conditions.DeduplicateConditions(pol.Conditions) apiConds := conditions.ConvertConditions(conds, pol.Source.Generation, transitionTime) - status := v1alpha2.PolicyStatus{ - Ancestors: []v1alpha2.PolicyAncestorStatus{ - { - AncestorRef: v1.ParentReference{ - Namespace: (*v1.Namespace)(&pol.Gateway.Namespace), - Name: v1alpha2.ObjectName(pol.Gateway.Name), - Group: helpers.GetPointer[v1.Group](v1.GroupName), - Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), - }, - ControllerName: v1alpha2.GatewayController(gatewayCtlrName), - Conditions: apiConds, + policyAncestors := make([]v1alpha2.PolicyAncestorStatus, 0, len(pol.Gateways)) + for _, gwNsName := range pol.Gateways { + policyAncestorStatus := v1alpha2.PolicyAncestorStatus{ + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer(v1.Namespace(gwNsName.Namespace)), + Name: v1.ObjectName(gwNsName.Name), + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), }, - }, + ControllerName: v1alpha2.GatewayController(gatewayCtlrName), + Conditions: apiConds, + } + + policyAncestors = append(policyAncestors, policyAncestorStatus) + } + + status := v1alpha2.PolicyStatus{ + Ancestors: policyAncestors, } reqs = append(reqs, frameworkStatus.UpdateRequest{ diff --git a/internal/mode/static/status/prepare_requests_test.go b/internal/mode/static/status/prepare_requests_test.go index 8bb8ca34f7..fbc5ede98e 100644 --- a/internal/mode/static/status/prepare_requests_test.go +++ b/internal/mode/static/status/prepare_requests_test.go @@ -71,6 +71,9 @@ var ( { SectionName: helpers.GetPointer[v1.SectionName]("listener-80-2"), }, + { + SectionName: helpers.GetPointer[v1.SectionName]("listener-80-3"), + }, }, } @@ -85,7 +88,7 @@ var ( parentRefsValid = []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, SectionName: commonRouteSpecValid.ParentRefs[0].SectionName, Attachment: &graph.ParentRefAttachmentStatus{ Attached: true, @@ -93,11 +96,20 @@ var ( }, { Idx: 1, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, SectionName: commonRouteSpecValid.ParentRefs[1].SectionName, Attachment: &graph.ParentRefAttachmentStatus{ - Attached: false, - FailedCondition: invalidAttachmentCondition, + Attached: false, + FailedConditions: []conditions.Condition{invalidAttachmentCondition}, + }, + }, + { + Idx: 2, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, + SectionName: commonRouteSpecValid.ParentRefs[2].SectionName, + Attachment: &graph.ParentRefAttachmentStatus{ + Attached: true, + FailedConditions: []conditions.Condition{invalidAttachmentCondition}, }, }, } @@ -105,7 +117,7 @@ var ( parentRefsInvalid = []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, Attachment: nil, SectionName: commonRouteSpecInvalid.ParentRefs[0].SectionName, }, @@ -171,6 +183,38 @@ var ( }, }, }, + { + ParentRef: v1.ParentReference{ + Namespace: helpers.GetPointer(v1.Namespace(gwNsName.Namespace)), + Name: v1.ObjectName(gwNsName.Name), + SectionName: helpers.GetPointer[v1.SectionName]("listener-80-3"), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1.RouteConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + Reason: string(v1.RouteReasonAccepted), + Message: "The route is accepted", + }, + { + Type: string(v1.RouteConditionResolvedRefs), + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + Reason: string(v1.RouteReasonResolvedRefs), + Message: "All references are resolved", + }, + { + Type: invalidAttachmentCondition.Type, + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + LastTransitionTime: transitionTime, + }, + }, + }, }, } @@ -475,7 +519,7 @@ func TestBuildRouteStatusesNginxErr(t *testing.T) { ParentRefs: []graph.ParentRef{ { Idx: 0, - Gateway: gwNsName, + Gateway: &graph.ParentRefGateway{NamespacedName: gwNsName}, Attachment: &graph.ParentRefAttachmentStatus{ Attached: true, }, @@ -741,77 +785,15 @@ func TestBuildGatewayStatuses(t *testing.T) { routeKey := graph.RouteKey{NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}} tests := []struct { - nginxReloadRes graph.NginxReloadResult - gateway *graph.Gateway - ignoredGateways map[types.NamespacedName]*v1.Gateway - expected map[types.NamespacedName]v1.GatewayStatus - name string + nginxReloadRes graph.NginxReloadResult + gateway *graph.Gateway + expected map[types.NamespacedName]v1.GatewayStatus + name string }{ { name: "nil gateway and no ignored gateways", expected: map[types.NamespacedName]v1.GatewayStatus{}, }, - { - name: "nil gateway and ignored gateways", - ignoredGateways: map[types.NamespacedName]*v1.Gateway{ - {Namespace: "test", Name: "ignored-1"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored-1", - Namespace: "test", - Generation: 1, - }, - }, - {Namespace: "test", Name: "ignored-2"}: { - ObjectMeta: metav1.ObjectMeta{ - Name: "ignored-2", - Namespace: "test", - Generation: 2, - }, - }, - }, - expected: map[types.NamespacedName]v1.GatewayStatus{ - {Namespace: "test", Name: "ignored-1"}: { - Conditions: []metav1.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 1, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - { - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - ObservedGeneration: 1, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - }, - }, - {Namespace: "test", Name: "ignored-2"}: { - Conditions: []metav1.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - ObservedGeneration: 2, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - { - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - ObservedGeneration: 2, - LastTransitionTime: transitionTime, - Reason: string(staticConds.GatewayReasonGatewayConflict), - Message: staticConds.GatewayMessageGatewayConflict, - }, - }, - }, - }, - }, { name: "valid gateway; all valid listeners", gateway: &graph.Gateway{ @@ -1264,17 +1246,10 @@ func TestBuildGatewayStatuses(t *testing.T) { expectedTotalReqs++ } - for _, gw := range test.ignoredGateways { - err := k8sClient.Create(context.Background(), gw) - g.Expect(err).ToNot(HaveOccurred()) - expectedTotalReqs++ - } - updater := statusFramework.NewUpdater(k8sClient, logr.Discard()) reqs := PrepareGatewayRequests( test.gateway, - test.ignoredGateways, transitionTime, addr, test.nginxReloadRes, @@ -1304,6 +1279,7 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { type policyCfg struct { Name string Conditions []conditions.Condition + Gateways []types.NamespacedName Valid bool Ignored bool IsReferenced bool @@ -1322,7 +1298,7 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { Ignored: policyCfg.Ignored, IsReferenced: policyCfg.IsReferenced, Conditions: policyCfg.Conditions, - Gateway: types.NamespacedName{Name: "gateway", Namespace: "test"}, + Gateways: policyCfg.Gateways, } } @@ -1334,12 +1310,19 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { Valid: true, IsReferenced: true, Conditions: attachedConds, + Gateways: []types.NamespacedName{ + {Namespace: "test", Name: "gateway"}, + {Namespace: "test", Name: "gateway-2"}, + }, } invalidPolicyCfg := policyCfg{ Name: "invalid-bt", IsReferenced: true, Conditions: invalidConds, + Gateways: []types.NamespacedName{ + {Namespace: "test", Name: "gateway"}, + }, } ignoredPolicyCfg := policyCfg{ @@ -1392,6 +1375,25 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { }, }, }, + { + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer[v1.Namespace]("test"), + Name: "gateway-2", + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1alpha2.PolicyConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + LastTransitionTime: transitionTime, + Reason: string(v1alpha2.PolicyReasonAccepted), + Message: "Policy is accepted", + }, + }, + }, }, }, }, @@ -1470,6 +1472,25 @@ func TestBuildBackendTLSPolicyStatuses(t *testing.T) { }, }, }, + { + AncestorRef: v1.ParentReference{ + Namespace: helpers.GetPointer[v1.Namespace]("test"), + Name: "gateway-2", + Group: helpers.GetPointer[v1.Group](v1.GroupName), + Kind: helpers.GetPointer[v1.Kind](kinds.Gateway), + }, + ControllerName: gatewayCtlrName, + Conditions: []metav1.Condition{ + { + Type: string(v1alpha2.PolicyConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + LastTransitionTime: transitionTime, + Reason: string(v1alpha2.PolicyReasonAccepted), + Message: "Policy is accepted", + }, + }, + }, }, }, }, diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index 1be3accf14..cfa3614df7 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -199,10 +199,7 @@ func collectGraphResourceCount( ngfResourceCounts.GatewayClassCount++ } - ngfResourceCounts.GatewayCount = int64(len(g.IgnoredGateways)) - if g.Gateway != nil { - ngfResourceCounts.GatewayCount++ - } + ngfResourceCounts.GatewayCount = int64(len(g.Gateways)) routeCounts := computeRouteCount(g.Routes, g.L4Routes) ngfResourceCounts.HTTPRouteCount = routeCounts.HTTPRouteCount diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index e216ac3b5a..4620d186ce 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -272,15 +272,15 @@ var _ = Describe("Collector", Ordered, func() { graph := &graph.Graph{ GatewayClass: &graph.GatewayClass{}, - Gateway: &graph.Gateway{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Name: "gateway1"}: {}, + {Name: "gateway2"}: {}, + {Name: "gateway3"}: {}, + }, IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ {Name: "ignoredGC1"}: {}, {Name: "ignoredGC2"}: {}, }, - IgnoredGateways: map[types.NamespacedName]*gatewayv1.Gateway{ - {Name: "ignoredGw1"}: {}, - {Name: "ignoredGw2"}: {}, - }, Routes: map[graph.RouteKey]*graph.L7Route{ {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}}: {RouteType: graph.RouteTypeHTTP}, {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-2"}}: {RouteType: graph.RouteTypeHTTP}, @@ -578,7 +578,9 @@ var _ = Describe("Collector", Ordered, func() { graph1 = &graph.Graph{ GatewayClass: &graph.GatewayClass{}, - Gateway: &graph.Gateway{}, + Gateways: map[types.NamespacedName]*graph.Gateway{ + {Name: "gateway1"}: {}, + }, Routes: map[graph.RouteKey]*graph.L7Route{ {NamespacedName: types.NamespacedName{Namespace: "test", Name: "hr-1"}}: {RouteType: graph.RouteTypeHTTP}, }, diff --git a/tests/conformance/conformance-rbac.yaml b/tests/conformance/conformance-rbac.yaml index c1c0d54185..26572dbe94 100644 --- a/tests/conformance/conformance-rbac.yaml +++ b/tests/conformance/conformance-rbac.yaml @@ -16,6 +16,7 @@ rules: - pods - secrets - services + - serviceaccounts verbs: - create - delete diff --git a/tests/framework/resourcemanager.go b/tests/framework/resourcemanager.go index 50aab0653e..912c321090 100644 --- a/tests/framework/resourcemanager.go +++ b/tests/framework/resourcemanager.go @@ -692,25 +692,34 @@ func GetReadyNGFPodNames( ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - var podList core.PodList - if err := k8sClient.List( + var ngfPodNames []string + + err := wait.PollUntilContextCancel( ctx, - &podList, - client.InNamespace(namespace), - client.MatchingLabels{ - "app.kubernetes.io/instance": releaseName, - }, - ); err != nil { - return nil, fmt.Errorf("error getting list of NGF Pods: %w", err) - } + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + var podList core.PodList + if err := k8sClient.List( + ctx, + &podList, + client.InNamespace(namespace), + client.MatchingLabels{ + "app.kubernetes.io/instance": releaseName, + }, + ); err != nil { + return false, fmt.Errorf("error getting list of NGF Pods: %w", err) + } - if len(podList.Items) == 0 { - return nil, errors.New("unable to find NGF Pod(s)") + ngfPodNames = getReadyPodNames(podList) + return len(ngfPodNames) > 0, nil + }, + ) + if err != nil { + return nil, fmt.Errorf("timed out waiting for NGF Pods to be ready: %w", err) } - names := getReadyPodNames(podList) - - return names, nil + return ngfPodNames, nil } // GetReadyNginxPodNames returns the name(s) of the NGINX Pod(s). @@ -722,23 +731,32 @@ func GetReadyNginxPodNames( ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - var podList core.PodList - if err := k8sClient.List( + var nginxPodNames []string + + err := wait.PollUntilContextCancel( ctx, - &podList, - client.InNamespace(namespace), - client.HasLabels{"gateway.networking.k8s.io/gateway-name"}, - ); err != nil { - return nil, fmt.Errorf("error getting list of NGINX Pods: %w", err) - } + 500*time.Millisecond, + true, // poll immediately + func(ctx context.Context) (bool, error) { + var podList core.PodList + if err := k8sClient.List( + ctx, + &podList, + client.InNamespace(namespace), + client.HasLabels{"gateway.networking.k8s.io/gateway-name"}, + ); err != nil { + return false, fmt.Errorf("error getting list of NGINX Pods: %w", err) + } - if len(podList.Items) == 0 { - return nil, errors.New("unable to find NGINX Pod(s)") + nginxPodNames = getReadyPodNames(podList) + return len(nginxPodNames) > 0, nil + }, + ) + if err != nil { + return nil, fmt.Errorf("timed out waiting for NGINX Pods to be ready: %w", err) } - names := getReadyPodNames(podList) - - return names, nil + return nginxPodNames, nil } func getReadyPodNames(podList core.PodList) []string { diff --git a/tests/suite/advanced_routing_test.go b/tests/suite/advanced_routing_test.go index 844e1db02c..1359beb795 100644 --- a/tests/suite/advanced_routing_test.go +++ b/tests/suite/advanced_routing_test.go @@ -40,7 +40,7 @@ var _ = Describe("AdvancedRouting", Ordered, Label("functional", "routing"), fun Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index 835f3a9896..7bd5f4ed57 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -47,7 +47,7 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -255,13 +255,12 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" When("a ClientSettingsPolicy targets an invalid resources", func() { Specify("their accepted condition is set to TargetNotFound", func() { files := []string{ - "clientsettings/ignored-gateway.yaml", - "clientsettings/invalid-csp.yaml", + "clientsettings/invalid-route-csp.yaml", } Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) - nsname := types.NamespacedName{Name: "invalid-csp", Namespace: namespace} + nsname := types.NamespacedName{Name: "invalid-route-csp", Namespace: namespace} Expect(waitForCSPolicyToHaveTargetNotFoundAcceptedCond(nsname)).To(Succeed()) Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) diff --git a/tests/suite/graceful_recovery_test.go b/tests/suite/graceful_recovery_test.go index 2e844f46e6..bd71551933 100644 --- a/tests/suite/graceful_recovery_test.go +++ b/tests/suite/graceful_recovery_test.go @@ -210,7 +210,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), var err error Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -310,7 +310,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), var nginxPodNames []string Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout * 2). @@ -349,7 +349,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), } getLeaderElectionLeaseHolderName := func() (string, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetTimeout) + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.GetStatusTimeout) defer cancel() var lease coordination.Lease @@ -384,7 +384,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), } BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(podNames).To(HaveLen(1)) @@ -400,7 +400,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -433,7 +433,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), It("recovers when nginx container is restarted", func() { restartNginxContainer(activeNginxPodName, ns.Name, nginxContainerName) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) activeNginxPodName = nginxPodNames[0] diff --git a/tests/suite/manifests/clientsettings/ignored-gateway.yaml b/tests/suite/manifests/clientsettings/ignored-gateway.yaml deleted file mode 100644 index 74d8317b01..0000000000 --- a/tests/suite/manifests/clientsettings/ignored-gateway.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: Gateway -metadata: - name: ignored-gateway -spec: - gatewayClassName: nginx - listeners: - - name: http - port: 80 - protocol: HTTP - hostname: "*.example.com" diff --git a/tests/suite/manifests/clientsettings/invalid-csp.yaml b/tests/suite/manifests/clientsettings/invalid-csp.yaml deleted file mode 100644 index cedfb52e46..0000000000 --- a/tests/suite/manifests/clientsettings/invalid-csp.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: gateway.nginx.org/v1alpha1 -kind: ClientSettingsPolicy -metadata: - name: invalid-csp -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: ignored-gateway - body: - maxSize: 10m - timeout: 30s - keepAlive: - requests: 100 - time: 5s - timeout: - server: 2s - header: 1s diff --git a/tests/suite/manifests/clientsettings/invalid-route-csp.yaml b/tests/suite/manifests/clientsettings/invalid-route-csp.yaml new file mode 100644 index 0000000000..e856d6e5e9 --- /dev/null +++ b/tests/suite/manifests/clientsettings/invalid-route-csp.yaml @@ -0,0 +1,33 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: invalid-route +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /invalid + headers: + - name: host_name + value: v2 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.nginx.org/v1alpha1 +kind: ClientSettingsPolicy +metadata: + name: invalid-route-csp +spec: + targetRef: + group: gateway.networking.k8s.io + kind: HTTPRoute + name: invalid-route + keepAlive: + requests: 200 diff --git a/tests/suite/nginxgateway_test.go b/tests/suite/nginxgateway_test.go index 1129310fab..a2d44e3a77 100644 --- a/tests/suite/nginxgateway_test.go +++ b/tests/suite/nginxgateway_test.go @@ -98,7 +98,7 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f k8sClient, ngfNamespace, releaseName, - timeoutConfig.GetTimeout, + timeoutConfig.GetStatusTimeout, ) if err != nil { return "", err diff --git a/tests/suite/sample_test.go b/tests/suite/sample_test.go index 3426ed973f..d4c265ec37 100644 --- a/tests/suite/sample_test.go +++ b/tests/suite/sample_test.go @@ -38,7 +38,7 @@ var _ = Describe("Basic test example", Label("functional"), func() { Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/snippets_filter_test.go b/tests/suite/snippets_filter_test.go index 39e099a8f6..397f1e8c58 100644 --- a/tests/suite/snippets_filter_test.go +++ b/tests/suite/snippets_filter_test.go @@ -42,7 +42,7 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/tracing_test.go b/tests/suite/tracing_test.go index 83c46d4cb4..19b8a55f5f 100644 --- a/tests/suite/tracing_test.go +++ b/tests/suite/tracing_test.go @@ -92,7 +92,7 @@ var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "trac Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/upstream_settings_test.go b/tests/suite/upstream_settings_test.go index dad10bc0e5..4243a432c9 100644 --- a/tests/suite/upstream_settings_test.go +++ b/tests/suite/upstream_settings_test.go @@ -51,7 +51,7 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) From 1598552b05f206ce5a5eb66238a66fe451da67d3 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Tue, 22 Apr 2025 16:35:12 -0700 Subject: [PATCH 21/32] CP/DP Update non-functional tests (#3305) Update non-functional tests for the control plane data plane split. Problem: The non-functional tests do not work for the control plane data plane split changes. Solution: Update non-functional tests. Testing: Scale, Reconfiguration, Performance, and Longevity tests work. Upgrade test doesn't work, however that is sort of planned since the CP/DP split is a breaking change of NGF and thus you can't easily upgrade with zero downtime. --------- Co-authored-by: Saylor Berman --- .github/workflows/nfr.yml | 7 + charts/nginx-gateway-fabric/values.yaml | 10 +- internal/mode/static/handler_test.go | 8 +- tests/Makefile | 6 +- tests/framework/crossplane.go | 14 +- tests/framework/info.go | 25 + tests/framework/prometheus.go | 113 ---- tests/framework/resourcemanager.go | 44 +- tests/framework/timeout.go | 2 +- tests/scripts/push-crossplane-image.sh | 8 + tests/suite/advanced_routing_test.go | 1 + tests/suite/client_settings_test.go | 3 +- tests/suite/dataplane_perf_test.go | 1 + tests/suite/graceful_recovery_test.go | 1 + tests/suite/longevity_test.go | 1 + .../suite/manifests/reconfig/cafe-routes.yaml | 6 +- .../scale/zero-downtime/values-affinity.yaml | 40 +- .../manifests/scale/zero-downtime/values.yaml | 18 +- tests/suite/reconfig_test.go | 509 +++++++----------- tests/suite/sample_test.go | 1 + tests/suite/scale_test.go | 178 +++--- tests/suite/scripts/longevity-wrk.sh | 11 +- tests/suite/snippets_filter_test.go | 3 +- tests/suite/system_suite_test.go | 33 +- tests/suite/tracing_test.go | 1 + tests/suite/upgrade_test.go | 11 +- tests/suite/upstream_settings_test.go | 5 +- 27 files changed, 486 insertions(+), 574 deletions(-) create mode 100755 tests/scripts/push-crossplane-image.sh diff --git a/.github/workflows/nfr.yml b/.github/workflows/nfr.yml index 5eabd96b88..c8968cb056 100644 --- a/.github/workflows/nfr.yml +++ b/.github/workflows/nfr.yml @@ -92,6 +92,13 @@ jobs: workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY }} service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} + - name: Login to GAR + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + with: + registry: us-docker.pkg.dev + username: oauth2accesstoken + password: ${{ steps.auth.outputs.access_token }} + - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 with: diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index a1197593d3..bd2872e9ad 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -375,19 +375,19 @@ nginx: # -- The termination grace period of the NGINX data plane pod. # terminationGracePeriodSeconds: 30 - # -- Tolerations for the NGINX Gateway Fabric control plane pod. + # -- Tolerations for the NGINX data plane pod. # tolerations: [] - # -- The nodeSelector of the NGINX Gateway Fabric control plane pod. + # -- The nodeSelector of the NGINX data plane pod. # nodeSelector: {} - # -- The affinity of the NGINX Gateway Fabric control plane pod. + # -- The affinity of the NGINX data plane pod. # affinity: {} - # -- The topology spread constraints for the NGINX Gateway Fabric control plane pod. + # -- The topology spread constraints for the NGINX data plane pod. # topologySpreadConstraints: [] - # -- extraVolumes for the NGINX Gateway Fabric control plane pod. Use in conjunction with + # -- extraVolumes for the NGINX data plane pod. Use in conjunction with # nginx.container.extraVolumeMounts mount additional volumes to the container. # extraVolumes: [] diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index 6d62be42b3..ec5bfa437d 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -249,8 +249,6 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) checkUpsertEventExpectations(e) - Expect(fakeProvisioner.RegisterGatewayCallCount()).Should(Equal(0)) - Expect(fakeGenerator.GenerateCallCount()).Should(Equal(0)) // status update should still occur for GatewayClasses Eventually( func() int { @@ -305,7 +303,7 @@ var _ = Describe("eventHandler", func() { Eventually( func() int { return fakeStatusUpdater.UpdateGroupCallCount() - }).Should(Equal(1)) + }).Should(BeNumerically(">", 1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) @@ -324,7 +322,7 @@ var _ = Describe("eventHandler", func() { Eventually( func() int { return fakeStatusUpdater.UpdateGroupCallCount() - }).Should(Equal(1)) + }).Should(BeNumerically(">", 1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) @@ -356,7 +354,7 @@ var _ = Describe("eventHandler", func() { Eventually( func() int { return fakeStatusUpdater.UpdateGroupCallCount() - }).Should(Equal(1)) + }).Should(BeNumerically(">", 1)) _, name, reqs := fakeStatusUpdater.UpdateGroupArgsForCall(0) Expect(name).To(Equal(groupControlPlane)) diff --git a/tests/Makefile b/tests/Makefile index dd82633d76..eec81e0ad7 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -44,7 +44,7 @@ build-test-runner-image: ## Build conformance test runner image .PHONY: build-crossplane-image build-crossplane-image: ## Build the crossplane image - docker build --build-arg NGINX_CONF_DIR=$(NGINX_CONF_DIR) -t nginx-crossplane:latest -f framework/crossplane/Dockerfile .. + docker build --platform $(GOOS)/$(GOARCH) --build-arg NGINX_CONF_DIR=$(NGINX_CONF_DIR) -t nginx-crossplane:latest -f framework/crossplane/Dockerfile .. .PHONY: run-conformance-tests run-conformance-tests: ## Run conformance tests @@ -105,7 +105,9 @@ sync-files-to-vm: ## Syncs your local NGF files with the NGF repo on the VM ./scripts/sync-files-to-vm.sh .PHONY: nfr-test -nfr-test: check-for-plus-usage-endpoint ## Run the NFR tests on a GCP VM +nfr-test: GOARCH=amd64 +nfr-test: check-for-plus-usage-endpoint build-crossplane-image ## Run the NFR tests on a GCP VM + ./scripts/push-crossplane-image.sh CI=$(CI) ./scripts/run-tests-gcp-vm.sh .PHONY: start-longevity-test diff --git a/tests/framework/crossplane.go b/tests/framework/crossplane.go index f2ada703c5..02a16b6cb5 100644 --- a/tests/framework/crossplane.go +++ b/tests/framework/crossplane.go @@ -38,6 +38,8 @@ type ExpectedNginxField struct { ValueSubstringAllowed bool } +const crossplaneImageName = "nginx-crossplane:latest" + // ValidateNginxFieldExists accepts the nginx config and the configuration for the expected field, // and returns whether or not that field exists where it should. func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) error { @@ -144,11 +146,17 @@ func injectCrossplaneContainer( k8sClient kubernetes.Interface, timeout time.Duration, ngfPodName, - namespace string, + namespace, + crossplaneImageRepo string, ) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() + image := crossplaneImageName + if crossplaneImageRepo != "" { + image = crossplaneImageRepo + "/" + image + } + pod := &core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: ngfPodName, @@ -160,8 +168,8 @@ func injectCrossplaneContainer( TargetContainerName: "nginx", EphemeralContainerCommon: core.EphemeralContainerCommon{ Name: "crossplane", - Image: "nginx-crossplane:latest", - ImagePullPolicy: "Never", + Image: image, + ImagePullPolicy: "IfNotPresent", Stdin: true, VolumeMounts: []core.VolumeMount{ { diff --git a/tests/framework/info.go b/tests/framework/info.go index 588b728631..c485edc9aa 100644 --- a/tests/framework/info.go +++ b/tests/framework/info.go @@ -4,6 +4,7 @@ import ( "fmt" "runtime/debug" + . "github.com/onsi/ginkgo/v2" core "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -81,3 +82,27 @@ func GetBuildInfo() (commitHash string, commitTime string, dirtyBuild string) { return } + +// AddNginxLogsAndEventsToReport adds nginx logs and events from the namespace to the report if the spec failed. +func AddNginxLogsAndEventsToReport(rm ResourceManager, namespace string) { + if CurrentSpecReport().Failed() { + var returnLogs string + + nginxPodNames, _ := GetReadyNginxPodNames(rm.K8sClient, namespace, rm.TimeoutConfig.GetStatusTimeout) + + for _, nginxPodName := range nginxPodNames { + returnLogs += fmt.Sprintf("Logs for Nginx Pod %s:\n", nginxPodName) + nginxLogs, _ := rm.GetPodLogs( + namespace, + nginxPodName, + &core.PodLogOptions{Container: "nginx"}, + ) + + returnLogs += fmt.Sprintf(" %s\n", nginxLogs) + } + AddReportEntry("Nginx Logs", returnLogs, ReportEntryVisibilityNever) + + events := GetEvents(rm, namespace) + AddReportEntry("Test Events", events, ReportEntryVisibilityNever) + } +} diff --git a/tests/framework/prometheus.go b/tests/framework/prometheus.go index fd7bf44624..3c3094712b 100644 --- a/tests/framework/prometheus.go +++ b/tests/framework/prometheus.go @@ -302,119 +302,6 @@ type Bucket struct { Val int } -// GetReloadCount gets the total number of nginx reloads. -func GetReloadCount(promInstance PrometheusInstance, ngfPodName string) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadCountWithStartTime gets the total number of nginx reloads from a start time to the current time. -func GetReloadCountWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadErrsCountWithStartTime gets the total number of nginx reload errors from a start time to the current time. -func GetReloadErrsCountWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reload_errors_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reload_errors_total{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadAvgTime gets the average time in milliseconds for nginx to reload. -func GetReloadAvgTime(promInstance PrometheusInstance, ngfPodName string) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"}`+ - ` / `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadAvgTimeWithStartTime gets the average time in milliseconds for nginx to reload using a start time -// to the current time to calculate. -func GetReloadAvgTimeWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) (float64, error) { - return getFirstValueOfVector( - fmt.Sprintf( - `(nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_milliseconds_sum{pod="%[1]s"} @ %[2]d)`+ - ` / `+ - `(nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_total{pod="%[1]s"} @ %[2]d)`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - -// GetReloadBuckets gets the Buckets in millisecond intervals for nginx reloads. -func GetReloadBuckets(promInstance PrometheusInstance, ngfPodName string) ([]Bucket, error) { - return getBuckets( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"}`, - ngfPodName, - ), - promInstance, - ) -} - -// GetReloadBucketsWithStartTime gets the Buckets in millisecond intervals for nginx reloads from a start time -// to the current time. -func GetReloadBucketsWithStartTime( - promInstance PrometheusInstance, - ngfPodName string, - startTime time.Time, -) ([]Bucket, error) { - return getBuckets( - fmt.Sprintf( - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"}`+ - ` - `+ - `nginx_gateway_fabric_nginx_reloads_milliseconds_bucket{pod="%[1]s"} @ %d`, - ngfPodName, - startTime.Unix(), - ), - promInstance, - ) -} - // GetEventsCount gets the NGF event batch processing count. func GetEventsCount(promInstance PrometheusInstance, ngfPodName string) (float64, error) { return getFirstValueOfVector( diff --git a/tests/framework/resourcemanager.go b/tests/framework/resourcemanager.go index 912c321090..f398e97375 100644 --- a/tests/framework/resourcemanager.go +++ b/tests/framework/resourcemanager.go @@ -46,6 +46,8 @@ import ( "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" v1 "sigs.k8s.io/gateway-api/apis/v1" + + ngfAPIv1alpha2 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" ) // ResourceManager handles creating/updating/deleting Kubernetes resources. @@ -647,6 +649,44 @@ func (rm *ResourceManager) GetNGFDeployment(namespace, releaseName string) (*app return &deployment, nil } +func (rm *ResourceManager) getGatewayClassNginxProxy( + namespace, + releaseName string, +) (*ngfAPIv1alpha2.NginxProxy, error) { + ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.GetTimeout) + defer cancel() + + var proxy ngfAPIv1alpha2.NginxProxy + proxyName := releaseName + "-proxy-config" + + if err := rm.K8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: proxyName}, &proxy); err != nil { + return nil, err + } + + return &proxy, nil +} + +// ScaleNginxDeployment scales the Nginx Deployment to the specified number of replicas. +func (rm *ResourceManager) ScaleNginxDeployment(namespace, releaseName string, replicas int32) error { + ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.UpdateTimeout) + defer cancel() + + // If there is another NginxProxy which "overrides" the gateway class one, then this won't work and + // may need refactoring. + proxy, err := rm.getGatewayClassNginxProxy(namespace, releaseName) + if err != nil { + return fmt.Errorf("error getting NginxProxy: %w", err) + } + + proxy.Spec.Kubernetes.Deployment.Replicas = &replicas + + if err = rm.K8sClient.Update(ctx, proxy); err != nil { + return fmt.Errorf("error updating NginxProxy: %w", err) + } + + return nil +} + // GetEvents returns all Events in the specified namespace. func (rm *ResourceManager) GetEvents(namespace string) (*core.EventList, error) { ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.GetTimeout) @@ -843,12 +883,14 @@ func (rm *ResourceManager) WaitForGatewayObservedGeneration( } // GetNginxConfig uses crossplane to get the nginx configuration and convert it to JSON. -func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace string) (*Payload, error) { +// If the crossplane image is loaded locally on the node, crossplaneImageRepo can be empty. +func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace, crossplaneImageRepo string) (*Payload, error) { if err := injectCrossplaneContainer( rm.ClientGoClient, rm.TimeoutConfig.UpdateTimeout, nginxPodName, namespace, + crossplaneImageRepo, ); err != nil { return nil, err } diff --git a/tests/framework/timeout.go b/tests/framework/timeout.go index 956b1699f3..8d8557622f 100644 --- a/tests/framework/timeout.go +++ b/tests/framework/timeout.go @@ -43,7 +43,7 @@ func DefaultTimeoutConfig() TimeoutConfig { CreateTimeout: 60 * time.Second, UpdateTimeout: 60 * time.Second, DeleteTimeout: 10 * time.Second, - DeleteNamespaceTimeout: 60 * time.Second, + DeleteNamespaceTimeout: 90 * time.Second, GetTimeout: 10 * time.Second, ManifestFetchTimeout: 10 * time.Second, RequestTimeout: 10 * time.Second, diff --git a/tests/scripts/push-crossplane-image.sh b/tests/scripts/push-crossplane-image.sh new file mode 100755 index 0000000000..31bd06d2f6 --- /dev/null +++ b/tests/scripts/push-crossplane-image.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eo pipefail + +source scripts/vars.env + +docker tag nginx-crossplane:latest us-docker.pkg.dev/$GKE_PROJECT/nginx-gateway-fabric/nginx-crossplane:latest +docker push us-docker.pkg.dev/$GKE_PROJECT/nginx-gateway-fabric/nginx-crossplane:latest diff --git a/tests/suite/advanced_routing_test.go b/tests/suite/advanced_routing_test.go index 1359beb795..cffc5bad5b 100644 --- a/tests/suite/advanced_routing_test.go +++ b/tests/suite/advanced_routing_test.go @@ -48,6 +48,7 @@ var _ = Describe("AdvancedRouting", Ordered, Label("functional", "routing"), fun }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index 7bd5f4ed57..7a77c0dea9 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -57,6 +57,7 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) cleanUpPortForward() Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) @@ -109,7 +110,7 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" BeforeAll(func() { var err error - conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/suite/dataplane_perf_test.go b/tests/suite/dataplane_perf_test.go index aa34131db1..adedebf05f 100644 --- a/tests/suite/dataplane_perf_test.go +++ b/tests/suite/dataplane_perf_test.go @@ -88,6 +88,7 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) diff --git a/tests/suite/graceful_recovery_test.go b/tests/suite/graceful_recovery_test.go index bd71551933..c2987610f7 100644 --- a/tests/suite/graceful_recovery_test.go +++ b/tests/suite/graceful_recovery_test.go @@ -425,6 +425,7 @@ var _ = Describe("Graceful Recovery test", Ordered, Label("graceful-recovery"), }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) diff --git a/tests/suite/longevity_test.go b/tests/suite/longevity_test.go index c768c71cb7..182c0fd676 100644 --- a/tests/suite/longevity_test.go +++ b/tests/suite/longevity_test.go @@ -82,6 +82,7 @@ var _ = Describe("Longevity", Label("longevity-setup", "longevity-teardown"), fu Expect(writeTrafficResults(resultsFile, homeDir, "coffee.txt", "HTTP")).To(Succeed()) Expect(writeTrafficResults(resultsFile, homeDir, "tea.txt", "HTTPS")).To(Succeed()) + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) }) diff --git a/tests/suite/manifests/reconfig/cafe-routes.yaml b/tests/suite/manifests/reconfig/cafe-routes.yaml index 006a8eba92..454d093892 100644 --- a/tests/suite/manifests/reconfig/cafe-routes.yaml +++ b/tests/suite/manifests/reconfig/cafe-routes.yaml @@ -5,7 +5,7 @@ metadata: spec: parentRefs: - name: gateway - namespace: default + namespace: reconfig sectionName: http hostnames: - "cafe.example.com" @@ -23,7 +23,7 @@ metadata: spec: parentRefs: - name: gateway - namespace: default + namespace: reconfig sectionName: https hostnames: - "cafe.example.com" @@ -43,8 +43,8 @@ metadata: spec: parentRefs: - name: gateway + namespace: reconfig sectionName: https - namespace: default hostnames: - "cafe.example.com" rules: diff --git a/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml b/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml index d9a0381b8e..ea19a27470 100644 --- a/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml +++ b/tests/suite/manifests/scale/zero-downtime/values-affinity.yaml @@ -3,24 +3,26 @@ nginxGateway: preStop: exec: command: - - /usr/bin/gateway - - sleep - - --duration=40s + - /usr/bin/gateway + - sleep + - --duration=40s + terminationGracePeriodSeconds: 50 -nginx: - lifecycle: - preStop: - exec: - command: - - /bin/sleep - - "40" - -terminationGracePeriodSeconds: 50 -affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - app.kubernetes.io/name: nginx-gateway +nginx: + pod: + terminationGracePeriodSeconds: 50 + container: + lifecycle: + preStop: + exec: + command: + - /bin/sleep + - "40" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: gateway-nginx diff --git a/tests/suite/manifests/scale/zero-downtime/values.yaml b/tests/suite/manifests/scale/zero-downtime/values.yaml index b4de7a5528..06f18b79a7 100644 --- a/tests/suite/manifests/scale/zero-downtime/values.yaml +++ b/tests/suite/manifests/scale/zero-downtime/values.yaml @@ -6,13 +6,15 @@ nginxGateway: - /usr/bin/gateway - sleep - --duration=40s + terminationGracePeriodSeconds: 50 nginx: - lifecycle: - preStop: - exec: - command: - - /bin/sleep - - "40" - -terminationGracePeriodSeconds: 50 + pod: + terminationGracePeriodSeconds: 50 + container: + lifecycle: + preStop: + exec: + command: + - /bin/sleep + - "40" diff --git a/tests/suite/reconfig_test.go b/tests/suite/reconfig_test.go index fb4d6c02ce..a28596e5ad 100644 --- a/tests/suite/reconfig_test.go +++ b/tests/suite/reconfig_test.go @@ -107,7 +107,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r return nil } - createResourcesGWLast := func(resourceCount int) { + createResources := func(resourceCount int) { ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout*5) defer cancel() @@ -140,44 +140,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r } Expect(resourceManager.WaitForPodsToBeReady(ctx, ns.Name)).To(Succeed()) } - - Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) - } - - createResourcesRoutesLast := func(resourceCount int) { - ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.CreateTimeout*5) - defer cancel() - - for i := 1; i <= resourceCount; i++ { - ns := core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace" + strconv.Itoa(i), - }, - } - Expect(k8sClient.Create(ctx, &ns)).To(Succeed()) - } - - Expect(createUniqueResources(resourceCount, "manifests/reconfig/cafe.yaml")).To(Succeed()) - - for i := 1; i <= resourceCount; i++ { - ns := core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace" + strconv.Itoa(i), - }, - } - Expect(resourceManager.WaitForPodsToBeReady(ctx, ns.Name)).To(Succeed()) - } - - Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) - Expect(resourceManager.ApplyFromFiles( - []string{ - "reconfig/cafe-secret.yaml", - "reconfig/reference-grant.yaml", - "reconfig/gateway.yaml", - }, - reconfigNamespace.Name)).To(Succeed()) - - Expect(createUniqueResources(resourceCount, "manifests/reconfig/cafe-routes.yaml")).To(Succeed()) } checkResourceCreation := func(resourceCount int) error { @@ -223,131 +185,64 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r return err } - getTimeStampFromLogLine := func(logLine string) string { - var timeStamp string - - timeStamp = strings.Split(logLine, "\"ts\":\"")[1] - // sometimes the log message will contain information on a "logger" followed by the "msg" - // while other times the "logger" will be omitted - timeStamp = strings.Split(timeStamp, "\",\"msg\"")[0] - timeStamp = strings.Split(timeStamp, "\",\"logger\"")[0] - - return timeStamp - } - - calculateTimeDifferenceBetweenLogLines := func(firstLine, secondLine string) (int, error) { - layout := time.RFC3339 - - firstTS := getTimeStampFromLogLine(firstLine) - secondTS := getTimeStampFromLogLine(secondLine) - - parsedTS1, err := time.Parse(layout, firstTS) - if err != nil { - return 0, err - } - - parsedTS2, err := time.Parse(layout, secondTS) - if err != nil { - return 0, err - } - - return int(parsedTS2.Sub(parsedTS1).Seconds()), nil - } + checkNginxConfIsPopulated := func(nginxPodName string, resourceCount int) error { + ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout*2) + defer cancel() - calculateTimeToReadyAverage := func(ngfLogs string) (string, error) { - var reconcilingLine, nginxReloadLine string - const maxCount = 5 - - var times [maxCount]int - var count int - - // parse the logs until it reaches a reconciling log line for a gateway resource, then it compares that - // timestamp to the next NGINX configuration update. When it reaches the NGINX configuration update line, - // it will reset the reconciling log line and set it to the next reconciling log line. - for _, line := range strings.Split(ngfLogs, "\n") { - if reconcilingLine == "" && - strings.Contains(line, "Reconciling the resource\",\"controller\"") && - strings.Contains(line, "\"controllerGroup\":\"gateway.networking.k8s.io\"") { - reconcilingLine = line + index := 1 + conf, _ := resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + for index <= resourceCount { + namespace := "namespace" + strconv.Itoa(resourceCount) + expUpstream := framework.ExpectedNginxField{ + Directive: "upstream", + Value: namespace + "_coffee" + namespace + "_80", + File: "http.conf", } - if strings.Contains(line, "NGINX configuration was successfully updated") && reconcilingLine != "" { - nginxReloadLine = line - - timeDifference, err := calculateTimeDifferenceBetweenLogLines(reconcilingLine, nginxReloadLine) - if err != nil { - return "", err - } - reconcilingLine = "" - - times[count] = timeDifference - count++ - if count == maxCount-1 { - break + // each call to ValidateNginxFieldExists takes about 1ms + if err := framework.ValidateNginxFieldExists(conf, expUpstream); err != nil { + select { + case <-ctx.Done(): + return fmt.Errorf("error validating nginx conf was generated in "+namespace+": %w", err.Error()) + default: + // each call to GetNginxConfig takes about 70ms + conf, _ = resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + continue } } - } - var sum float64 - for _, time := range times { - sum += float64(time) + index++ } - avgTime := sum / float64(count+1) - - if avgTime < 1 { - return "< 1", nil - } - - return strconv.FormatFloat(avgTime, 'f', -1, 64), nil + return nil } - calculateTimeToReadyTotal := func(ngfLogs, startingLogSubstring string) (string, error) { - var firstLine, lastLine string - for _, line := range strings.Split(ngfLogs, "\n") { - if firstLine == "" && strings.Contains(line, startingLogSubstring) { - firstLine = line - } + calculateTimeToReadyTotal := func(nginxPodName string, startTime time.Time, resourceCount int) string { + Expect(checkNginxConfIsPopulated(nginxPodName, resourceCount)).To(Succeed()) + stopTime := time.Now() - if strings.Contains(line, "NGINX configuration was successfully updated") { - lastLine = line - } - } - - timeToReadyTotal, err := calculateTimeDifferenceBetweenLogLines(firstLine, lastLine) - if err != nil { - return "", err - } + stringTimeToReadyTotal := strconv.Itoa(int(stopTime.Sub(startTime).Seconds())) - stringTimeToReadyTotal := strconv.Itoa(timeToReadyTotal) if stringTimeToReadyTotal == "0" { stringTimeToReadyTotal = "< 1" } - return stringTimeToReadyTotal, nil + return stringTimeToReadyTotal } - deployNGFReturnsNGFPodNameAndStartTime := func() (string, time.Time) { - var startTime time.Time - + collectMetrics := func( + resourceCount int, + ngfPodName string, + startTime time.Time, + ) reconfigTestResults { getStartTime := func() time.Time { return startTime } modifyStartTime := func() { startTime = startTime.Add(500 * time.Millisecond) } - cfg := getDefaultSetupCfg() - cfg.nfr = true - setup(cfg) - - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(podNames).To(HaveLen(1)) - ngfPodName := podNames[0] - startTime = time.Now() - queries := []string{ fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), fmt.Sprintf(`container_cpu_usage_seconds_total{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -361,16 +256,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r ).WithTimeout(metricExistTimeout).WithPolling(metricExistPolling).Should(Succeed()) } - return ngfPodName, startTime - } - - collectMetrics := func( - testDescription string, - resourceCount int, - timeToReadyStartingLogSubstring string, - ngfPodName string, - startTime time.Time, - ) { time.Sleep(2 * scrapeInterval) endTime := time.Now() @@ -388,12 +273,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r getEndTime := func() time.Time { return endTime } noOpModifier := func() {} - queries := []string{ - fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), - // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), - } - for _, q := range queries { Eventually( framework.CreateMetricExistChecker( @@ -407,21 +286,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r checkNGFContainerLogsForErrors(ngfPodName) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, reconfigNamespace.Name, timeoutConfig.GetTimeout) - Expect(err).ToNot(HaveOccurred()) - Expect(nginxPodNames).To(HaveLen(1)) - - nginxErrorLogs := getNginxErrorLogs(nginxPodNames[0], reconfigNamespace.Name) - - reloadCount, err := framework.GetReloadCount(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) - - reloadAvgTime, err := framework.GetReloadAvgTime(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) - - reloadBuckets, err := framework.GetReloadBuckets(promInstance, ngfPodName) - Expect(err).ToNot(HaveOccurred()) - eventsCount, err := framework.GetEventsCount(promInstance, ngfPodName) Expect(err).ToNot(HaveOccurred()) @@ -431,158 +295,156 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r eventsBuckets, err := framework.GetEventsBuckets(promInstance, ngfPodName) Expect(err).ToNot(HaveOccurred()) - logs, err := resourceManager.GetPodLogs(ngfNamespace, ngfPodName, &core.PodLogOptions{ - Container: "nginx-gateway", - }) - Expect(err).ToNot(HaveOccurred()) - - // FIXME (bjee19): https://github.com/nginx/nginx-gateway-fabric/issues/2374 - // Find a way to calculate time to ready metrics without having to rely on specific log lines. - timeToReadyTotal, err := calculateTimeToReadyTotal(logs, timeToReadyStartingLogSubstring) - Expect(err).ToNot(HaveOccurred()) - - timeToReadyAvgSingle, err := calculateTimeToReadyAverage(logs) - Expect(err).ToNot(HaveOccurred()) - results := reconfigTestResults{ - TestDescription: testDescription, - EventsBuckets: eventsBuckets, - ReloadBuckets: reloadBuckets, - NumResources: resourceCount, - TimeToReadyTotal: timeToReadyTotal, - TimeToReadyAvgSingle: timeToReadyAvgSingle, - NGINXReloads: int(reloadCount), - NGINXReloadAvgTime: int(reloadAvgTime), - NGINXErrorLogs: nginxErrorLogs, - EventsCount: int(eventsCount), - EventsAvgTime: int(eventsAvgTime), + EventsBuckets: eventsBuckets, + NumResources: resourceCount, + EventsCount: int(eventsCount), + EventsAvgTime: int(eventsAvgTime), } - err = writeReconfigResults(outFile, results) - Expect(err).ToNot(HaveOccurred()) + return results } When("resources exist before startup", func() { testDescription := "Test 1: Resources exist before startup" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Starting NGINX Gateway Fabric" - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Starting NGINX Gateway Fabric" - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) + timeToReadyDescription := "From when NGF starts to when the NGINX configuration is fully configured" + DescribeTable(testDescription, + func(resourceCount int) { + createResources(resourceCount) + Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) + Expect(checkResourceCreation(resourceCount)).To(Succeed()) + + cfg := getDefaultSetupCfg() + cfg.nfr = true + setup(cfg) + + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + ngfPodName := podNames[0] + startTime := time.Now() + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames( + k8sClient, + reconfigNamespace.Name, + timeoutConfig.GetStatusTimeout, + ) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + WithPolling(500 * time.Millisecond). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + timeToReadyTotal := calculateTimeToReadyTotal(nginxPodName, startTime, resourceCount) + + nginxErrorLogs := getNginxErrorLogs(nginxPodNames[0], reconfigNamespace.Name) + + results := collectMetrics( + resourceCount, + ngfPodName, + startTime, + ) + + results.NGINXErrorLogs = nginxErrorLogs + results.TimeToReadyTotal = timeToReadyTotal + results.TestDescription = testDescription + results.TimeToReadyDescription = timeToReadyDescription + + err = writeReconfigResults(outFile, results) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("gathers metrics after creating 30 resources", 30), + Entry("gathers metrics after creating 150 resources", 150), + ) }) When("NGF and Gateway resource are deployed first", func() { - testDescription := "Test 2: Start NGF, deploy Gateway, create many resources attached to GW" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"httproute\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesRoutesLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"httproute\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesRoutesLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - }) - - When("NGF and resources are deployed first", func() { - testDescription := "Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway" - - It("gathers metrics after creating 30 resources", func() { - resourceCount := 30 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"gateway\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) - - It("gathers metrics after creating 150 resources", func() { - resourceCount := 150 - timeToReadyStartingLogSubstring := "Reconciling the resource\",\"controller\":\"gateway\"" - - ngfPodName, startTime := deployNGFReturnsNGFPodNameAndStartTime() - - createResourcesGWLast(resourceCount) - Expect(checkResourceCreation(resourceCount)).To(Succeed()) - - collectMetrics( - testDescription, - resourceCount, - timeToReadyStartingLogSubstring, - ngfPodName, - startTime, - ) - }) + testDescription := "Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, " + + "create many resources attached to GW" + timeToReadyDescription := "From when NGINX receives the first configuration created by NGF to " + + "when the NGINX configuration is fully configured" + DescribeTable(testDescription, + func(resourceCount int) { + cfg := getDefaultSetupCfg() + cfg.nfr = true + setup(cfg) + + podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(podNames).To(HaveLen(1)) + ngfPodName := podNames[0] + + Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) + Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) + + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames( + k8sClient, + reconfigNamespace.Name, + timeoutConfig.GetStatusTimeout, + ) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + // this checks if NGF has established a connection with agent and sent over the first nginx conf + Eventually( + func() bool { + conf, _ := resourceManager.GetNginxConfig(nginxPodName, reconfigNamespace.Name, nginxCrossplanePath) + // a default upstream NGF creates + defaultUpstream := framework.ExpectedNginxField{ + Directive: "upstream", + Value: "invalid-backend-ref", + File: "http.conf", + } + + return framework.ValidateNginxFieldExists(conf, defaultUpstream) == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + startTime := time.Now() + + createResources(resourceCount) + Expect(checkResourceCreation(resourceCount)).To(Succeed()) + + timeToReadyTotal := calculateTimeToReadyTotal(nginxPodName, startTime, resourceCount) + + nginxErrorLogs := getNginxErrorLogs(nginxPodName, reconfigNamespace.Name) + + results := collectMetrics( + resourceCount, + ngfPodName, + startTime, + ) + + results.NGINXErrorLogs = nginxErrorLogs + results.TimeToReadyTotal = timeToReadyTotal + results.TestDescription = testDescription + results.TimeToReadyDescription = timeToReadyDescription + + err = writeReconfigResults(outFile, results) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("gathers metrics after creating 30 resources", 30), + Entry("gathers metrics after creating 150 resources", 150), + ) }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, reconfigNamespace.Name) + Expect(cleanupResources()).Should(Succeed()) teardown(releaseName) }) @@ -600,32 +462,23 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r }) type reconfigTestResults struct { - TestDescription string - TimeToReadyTotal string - TimeToReadyAvgSingle string - NGINXErrorLogs string - EventsBuckets []framework.Bucket - ReloadBuckets []framework.Bucket - NumResources int - NGINXReloads int - NGINXReloadAvgTime int - EventsCount int - EventsAvgTime int + TestDescription string + TimeToReadyTotal string + TimeToReadyDescription string + NGINXErrorLogs string + EventsBuckets []framework.Bucket + NumResources int + EventsCount int + EventsAvgTime int } const reconfigResultTemplate = ` ## {{ .TestDescription }} - NumResources {{ .NumResources }} -### Reloads and Time to Ready +### Time to Ready +Time To Ready Description: {{ .TimeToReadyDescription }} - TimeToReadyTotal: {{ .TimeToReadyTotal }}s -- TimeToReadyAvgSingle: {{ .TimeToReadyAvgSingle }}s -- NGINX Reloads: {{ .NGINXReloads }} -- NGINX Reload Average Time: {{ .NGINXReloadAvgTime }}ms -- Reload distribution: -{{- range .ReloadBuckets }} - - {{ .Le }}ms: {{ .Val }} -{{- end }} ### Event Batch Processing diff --git a/tests/suite/sample_test.go b/tests/suite/sample_test.go index d4c265ec37..191d20134d 100644 --- a/tests/suite/sample_test.go +++ b/tests/suite/sample_test.go @@ -46,6 +46,7 @@ var _ = Describe("Basic test example", Label("functional"), func() { }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, namespace)).To(Succeed()) diff --git a/tests/suite/scale_test.go b/tests/suite/scale_test.go index 4dadb97d60..8a04a088e7 100644 --- a/tests/suite/scale_test.go +++ b/tests/suite/scale_test.go @@ -119,31 +119,17 @@ var _ = Describe("Scale test", Ordered, Label("nfr", "scale"), func() { type scaleTestResults struct { Name string EventsBuckets []framework.Bucket - ReloadBuckets []framework.Bucket EventsAvgTime int EventsCount int NGFContainerRestarts int NGFErrors int NginxContainerRestarts int NginxErrors int - ReloadAvgTime int - ReloadCount int - ReloadErrsCount int } const scaleResultTemplate = ` ## Test {{ .Name }} -### Reloads - -- Total: {{ .ReloadCount }} -- Total Errors: {{ .ReloadErrsCount }} -- Average Time: {{ .ReloadAvgTime }}ms -- Reload distribution: -{{- range .ReloadBuckets }} - - {{ .Le }}ms: {{ .Val }} -{{- end }} - ### Event Batch Processing - Total: {{ .EventsCount }} @@ -176,12 +162,14 @@ The logs are attached only if there are errors. } checkLogErrors := func( - containerName string, + containerName, + podName, + namespace, + fileName string, substrings []string, ignoredSubstrings []string, - fileName string, ) int { - logs, err := resourceManager.GetPodLogs(ngfNamespace, ngfPodName, &core.PodLogOptions{ + logs, err := resourceManager.GetPodLogs(namespace, podName, &core.PodLogOptions{ Container: containerName, }) Expect(err).ToNot(HaveOccurred()) @@ -237,7 +225,7 @@ The logs are attached only if there are errors. fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), fmt.Sprintf(`container_cpu_usage_seconds_total{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -280,7 +268,7 @@ The logs are attached only if there are errors. queries = []string{ fmt.Sprintf(`container_memory_usage_bytes{pod="%s",container="nginx-gateway"}`, ngfPodName), // We don't need to check all nginx_gateway_fabric_* metrics, as they are collected at the same time - fmt.Sprintf(`nginx_gateway_fabric_nginx_reloads_total{pod="%s"}`, ngfPodName), + fmt.Sprintf(`nginx_gateway_fabric_event_batch_processing_milliseconds_sum{pod="%s"}`, ngfPodName), } for _, q := range queries { @@ -337,18 +325,6 @@ The logs are attached only if there are errors. Expect(os.Remove(cpuCSV)).To(Succeed()) - reloadCount, err := framework.GetReloadCountWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadErrsCount, err := framework.GetReloadErrsCountWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadAvgTime, err := framework.GetReloadAvgTimeWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - - reloadBuckets, err := framework.GetReloadBucketsWithStartTime(promInstance, ngfPodName, startTime) - Expect(err).ToNot(HaveOccurred()) - eventsCount, err := framework.GetEventsCountWithStartTime(promInstance, ngfPodName, startTime) Expect(err).ToNot(HaveOccurred()) @@ -362,43 +338,53 @@ The logs are attached only if there are errors. ngfErrors := checkLogErrors( "nginx-gateway", + ngfPodName, + ngfNamespace, + filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "ngf", *plusEnabled)), []string{"error"}, []string{`"logger":"usageReporter`}, // ignore usageReporter errors - filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "ngf", *plusEnabled)), ) + + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + nginxPodName := nginxPodNames[0] + nginxErrors := checkLogErrors( "nginx", + nginxPodName, + namespace, + filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "nginx", *plusEnabled)), []string{framework.ErrorNGINXLog, framework.EmergNGINXLog, framework.CritNGINXLog, framework.AlertNGINXLog}, nil, - filepath.Join(testResultsDir, framework.CreateResultsFilename("log", "nginx", *plusEnabled)), ) // Check container restarts - pod, err := resourceManager.GetPod(ngfNamespace, ngfPodName) + ngfPod, err := resourceManager.GetPod(ngfNamespace, ngfPodName) + Expect(err).ToNot(HaveOccurred()) + + nginxPod, err := resourceManager.GetPod(namespace, nginxPodName) Expect(err).ToNot(HaveOccurred()) - findRestarts := func(name string) int { + findRestarts := func(containerName string, pod *core.Pod) int { for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.Name == name { + if containerStatus.Name == containerName { return int(containerStatus.RestartCount) } } - Fail(fmt.Sprintf("container %s not found", name)) + Fail(fmt.Sprintf("container %s not found", containerName)) return 0 } - ngfRestarts := findRestarts("nginx-gateway") - nginxRestarts := findRestarts("nginx") + ngfRestarts := findRestarts("nginx-gateway", ngfPod) + nginxRestarts := findRestarts("nginx", nginxPod) // Write results results := scaleTestResults{ Name: testName, - ReloadCount: int(reloadCount), - ReloadErrsCount: int(reloadErrsCount), - ReloadAvgTime: int(reloadAvgTime), - ReloadBuckets: reloadBuckets, EventsCount: int(eventsCount), EventsAvgTime: int(eventsAvgTime), EventsBuckets: eventsBuckets, @@ -428,6 +414,22 @@ The logs are attached only if there are errors. for i := range len(objects.ScaleIterationGroups) { Expect(resourceManager.Apply(objects.ScaleIterationGroups[i])).To(Succeed()) + if i == 0 { + var nginxPodNames []string + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + } + var url string if protocol == "http" && portFwdPort != 0 { url = fmt.Sprintf("%s://%d.example.com:%d", protocol, i, portFwdPort) @@ -441,7 +443,7 @@ The logs are attached only if there are errors. Eventually( framework.CreateResponseChecker(url, address, timeoutConfig.RequestTimeout), - ).WithTimeout(5 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) + ).WithTimeout(6 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) ttr := time.Since(startCheck) @@ -466,6 +468,21 @@ The logs are attached only if there are errors. Expect(resourceManager.ApplyFromFiles(upstreamsManifests, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + var url string if portFwdPort != 0 { url = fmt.Sprintf("http://hello.example.com:%d", portFwdPort) @@ -598,6 +615,21 @@ The logs are attached only if there are errors. Expect(resourceManager.ApplyFromFiles(matchesManifests, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, namespace) + var port int if portFwdPort != 0 { port = portFwdPort @@ -611,7 +643,7 @@ The logs are attached only if there are errors. text := fmt.Sprintf("\n## Test %s\n\n", testName) - _, err := fmt.Fprint(outFile, text) + _, err = fmt.Fprint(outFile, text) Expect(err).ToNot(HaveOccurred()) run := func(t framework.Target) { @@ -650,8 +682,10 @@ The logs are attached only if there are errors. }) AfterEach(func() { - teardown(releaseName) + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + cleanUpPortForward() Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) + teardown(releaseName) }) AfterAll(func() { @@ -678,13 +712,13 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim } var ( - outFile *os.File - resultsDir string - ngfDeploymentName string - ns core.Namespace - metricsCh chan *metricsResults + outFile *os.File + resultsDir string + ns core.Namespace + metricsCh chan *metricsResults - files = []string{ + numCoffeeAndTeaPods = 20 + files = []string{ "scale/zero-downtime/cafe.yaml", "scale/zero-downtime/cafe-secret.yaml", "scale/zero-downtime/gateway-1.yaml", @@ -825,12 +859,12 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim numReplicas int }{ { - name: "One NGF Pod runs per node", + name: "One NGINX Pod runs per node", valuesFile: "manifests/scale/zero-downtime/values-affinity.yaml", numReplicas: 12, // equals number of nodes }, { - name: "Multiple NGF Pods run per node", + name: "Multiple NGINX Pods run per node", valuesFile: "manifests/scale/zero-downtime/values.yaml", numReplicas: 24, // twice the number of nodes }, @@ -843,19 +877,33 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim cfg.nfr = true setup(cfg, "--values", test.valuesFile) - deploy, err := resourceManager.GetNGFDeployment(ngfNamespace, releaseName) - Expect(err).ToNot(HaveOccurred()) - ngfDeploymentName = deploy.GetName() - Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + var nginxPodNames []string + var err error + Eventually( + func() bool { + nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + return len(nginxPodNames) == 1 && err == nil + }). + WithTimeout(timeoutConfig.CreateTimeout). + Should(BeTrue()) + + nginxPodName := nginxPodNames[0] + Expect(nginxPodName).ToNot(BeEmpty()) + + setUpPortForward(nginxPodName, ns.Name) + _, err = fmt.Fprintf(outFile, "\n## %s Test Results\n", test.name) Expect(err).ToNot(HaveOccurred()) }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + cleanUpPortForward() + teardown(releaseName) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) }) @@ -882,8 +930,8 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // scale NGF up one at a time for i := 2; i <= test.numReplicas; i++ { - Eventually(resourceManager.ScaleDeployment). - WithArguments(ngfNamespace, ngfDeploymentName, int32(i)). + Eventually(resourceManager.ScaleNginxDeployment). + WithArguments(ngfNamespace, releaseName, int32(i)). WithTimeout(timeoutConfig.UpdateTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) @@ -893,7 +941,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout) - Expect(resourceManager.WaitForPodsToBeReadyWithCount(ctx, ngfNamespace, i)).To(Succeed()) + Expect(resourceManager.WaitForPodsToBeReadyWithCount(ctx, ns.Name, i+numCoffeeAndTeaPods)).To(Succeed()) Expect(resourceManager.WaitForGatewayObservedGeneration(ctx, ns.Name, "gateway", i)).To(Succeed()) cancel() @@ -935,8 +983,8 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // scale NGF down one at a time currentGen := test.numReplicas for i := test.numReplicas - 1; i >= 1; i-- { - Eventually(resourceManager.ScaleDeployment). - WithArguments(ngfNamespace, ngfDeploymentName, int32(i)). + Eventually(resourceManager.ScaleNginxDeployment). + WithArguments(ngfNamespace, releaseName, int32(i)). WithTimeout(timeoutConfig.UpdateTimeout). WithPolling(500 * time.Millisecond). Should(Succeed()) @@ -1005,7 +1053,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // allow traffic flow to start time.Sleep(2 * time.Second) - Expect(resourceManager.ScaleDeployment(ngfNamespace, ngfDeploymentName, int32(test.numReplicas))).To(Succeed()) + Expect(resourceManager.ScaleNginxDeployment(ngfNamespace, releaseName, int32(test.numReplicas))).To(Succeed()) Expect(resourceManager.ApplyFromFiles([]string{"scale/zero-downtime/gateway-2.yaml"}, ns.Name)).To(Succeed()) checkGatewayListeners(3) @@ -1037,7 +1085,7 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim // allow traffic flow to start time.Sleep(2 * time.Second) - Expect(resourceManager.ScaleDeployment(ngfNamespace, ngfDeploymentName, int32(1))).To(Succeed()) + Expect(resourceManager.ScaleNginxDeployment(ngfNamespace, releaseName, int32(1))).To(Succeed()) Expect(resourceManager.ApplyFromFiles([]string{"scale/zero-downtime/gateway-1.yaml"}, ns.Name)).To(Succeed()) checkGatewayListeners(2) diff --git a/tests/suite/scripts/longevity-wrk.sh b/tests/suite/scripts/longevity-wrk.sh index e7d3a6b23a..1165cfa6b5 100755 --- a/tests/suite/scripts/longevity-wrk.sh +++ b/tests/suite/scripts/longevity-wrk.sh @@ -1,6 +1,15 @@ #!/usr/bin/env bash -SVC_IP=$(kubectl -n nginx-gateway get svc ngf-longevity-nginx-gateway-fabric -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +while true; do + SVC_IP=$(kubectl -n longevity get svc gateway-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + if [[ -n $SVC_IP ]]; then + echo "Service IP assigned: $SVC_IP" + break + fi + + echo "Still waiting for nginx Service IP..." + sleep 5 +done echo "${SVC_IP} cafe.example.com" | sudo tee -a /etc/hosts diff --git a/tests/suite/snippets_filter_test.go b/tests/suite/snippets_filter_test.go index 397f1e8c58..de6583d79b 100644 --- a/tests/suite/snippets_filter_test.go +++ b/tests/suite/snippets_filter_test.go @@ -67,6 +67,7 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) Expect(resourceManager.DeleteFromFiles(snippetsFilter, namespace)).To(Succeed()) }) @@ -119,7 +120,7 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter BeforeAll(func() { var err error - conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/suite/system_suite_test.go b/tests/suite/system_suite_test.go index 39e9e15524..6a8174bdb9 100644 --- a/tests/suite/system_suite_test.go +++ b/tests/suite/system_suite_test.go @@ -70,20 +70,21 @@ var ( var ( //go:embed manifests/* - manifests embed.FS - k8sClient client.Client // TODO: are the k8sClient and the resourceManager.k8sClient the same? - resourceManager framework.ResourceManager - portForwardStopCh chan struct{} - portFwdPort int - portFwdHTTPSPort int - timeoutConfig framework.TimeoutConfig - localChartPath string - address string - version string - chartVersion string - clusterInfo framework.ClusterInfo - skipNFRTests bool - logs string + manifests embed.FS + k8sClient client.Client + resourceManager framework.ResourceManager + portForwardStopCh chan struct{} + portFwdPort int + portFwdHTTPSPort int + timeoutConfig framework.TimeoutConfig + localChartPath string + address string + version string + chartVersion string + clusterInfo framework.ClusterInfo + skipNFRTests bool + logs string + nginxCrossplanePath string ) var formatNginxPlusEdgeImagePath = "us-docker.pkg.dev/%s/nginx-gateway-fabric/nginx-plus" @@ -171,6 +172,8 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { version = "edge" } + nginxCrossplanePath = "us-docker.pkg.dev/" + *gkeProject + "/nginx-gateway-fabric" + if !cfg.deploy { return } @@ -348,7 +351,7 @@ var _ = AfterSuite(func() { AddReportEntry("Events", events, ReportEntryVisibilityNever) logs = framework.GetLogs(resourceManager, ngfNamespace, releaseName) - AddReportEntry("Logs", logs, ReportEntryVisibilityNever) + AddReportEntry("NGF Logs", logs, ReportEntryVisibilityNever) labelFilter := GinkgoLabelFilter() if !strings.Contains(labelFilter, "longevity-setup") { diff --git a/tests/suite/tracing_test.go b/tests/suite/tracing_test.go index 19b8a55f5f..f56a083426 100644 --- a/tests/suite/tracing_test.go +++ b/tests/suite/tracing_test.go @@ -110,6 +110,7 @@ var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "trac }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) output, err := framework.UninstallCollector(resourceManager) Expect(err).ToNot(HaveOccurred(), string(output)) diff --git a/tests/suite/upgrade_test.go b/tests/suite/upgrade_test.go index d0dec2fc15..a62de97356 100644 --- a/tests/suite/upgrade_test.go +++ b/tests/suite/upgrade_test.go @@ -67,7 +67,12 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - var err error + nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + Expect(err).ToNot(HaveOccurred()) + Expect(nginxPodNames).To(HaveLen(1)) + + setUpPortForward(nginxPodNames[0], ns.Name) + resultsDir, err = framework.CreateResultsDir("ngf-upgrade", version) Expect(err).ToNot(HaveOccurred()) @@ -78,12 +83,16 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { }) AfterEach(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + cleanUpPortForward() + Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.DeleteNamespace(ns.Name)).To(Succeed()) resultsFile.Close() }) It("upgrades NGF with zero downtime", func() { + Skip("Skipping test until version 2.1.0 since 2.0.0 is a breaking change") nginxImage := *nginxImageRepository if *plusEnabled { nginxImage = *nginxPlusImageRepository diff --git a/tests/suite/upstream_settings_test.go b/tests/suite/upstream_settings_test.go index 4243a432c9..f2b02b1059 100644 --- a/tests/suite/upstream_settings_test.go +++ b/tests/suite/upstream_settings_test.go @@ -61,6 +61,7 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic }) AfterAll(func() { + framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) cleanUpPortForward() Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) @@ -130,7 +131,7 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic BeforeAll(func() { var err error - conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) @@ -310,7 +311,7 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic BeforeAll(func() { var err error - conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace) + conf, err = resourceManager.GetNginxConfig(nginxPodName, namespace, "") Expect(err).ToNot(HaveOccurred()) }) From 77662792508afed714b8b83201ff0bc57440346d Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 23 Apr 2025 08:20:31 -0600 Subject: [PATCH 22/32] Fix lint issue on rebase --- internal/mode/static/state/graph/gateway.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/mode/static/state/graph/gateway.go b/internal/mode/static/state/graph/gateway.go index 64604ae4c6..3ff21ce44f 100644 --- a/internal/mode/static/state/graph/gateway.go +++ b/internal/mode/static/state/graph/gateway.go @@ -188,7 +188,7 @@ func validateGateway(gw *v1.Gateway, gc *GatewayClass, npCfg *NginxProxy) ([]con // we evaluate validity before validating parametersRef because an invalid parametersRef/NginxProxy does not // invalidate the entire Gateway. - valid := !(len(conds) > 0) + valid := len(conds) == 0 if gw.Spec.Infrastructure != nil && gw.Spec.Infrastructure.ParametersRef != nil { paramConds := validateGatewayParametersRef(npCfg, *gw.Spec.Infrastructure.ParametersRef) From 79b55b8d0000f95b0f83ec1f6b01a3bba98ca28c Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 23 Apr 2025 09:24:41 -0600 Subject: [PATCH 23/32] Fix helm README generation --- charts/nginx-gateway-fabric/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 3a7a41cdf7..f2141de4d2 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -278,7 +278,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginx.usage.resolver` | The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager. | string | `""` | | `nginx.usage.secretName` | The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"nplus-license"` | | `nginx.usage.skipVerify` | Disable client verification of the NGINX Plus usage reporting server certificate. | bool | `false` | -| `nginxGateway` | The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. | object | `{"affinity":{},"config":{"logging":{"level":"info"}},"configAnnotations":{},"extraVolumeMounts":[],"extraVolumes":[],"gatewayClassAnnotations":{},"gatewayClassName":"nginx","gatewayControllerName":"gateway.nginx.org/nginx-gateway-controller","gwAPIExperimentalFeatures":{"enable":false},"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"},"kind":"deployment","leaderElection":{"enable":true,"lockName":""},"lifecycle":{},"metrics":{"enable":true,"port":9113,"secure":false},"nodeSelector":{},"podAnnotations":{},"productTelemetry":{"enable":true},"readinessProbe":{"enable":true,"initialDelaySeconds":3,"port":8081},"replicas":1,"resources":{},"service":{"annotations":{}},"serviceAccount":{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""},"snippetsFilters":{"enable":false},"terminationGracePeriodSeconds":30,"tolerations":[],"topologySpreadConstraints":[]}` | +| `nginxGateway` | The nginxGateway section contains configuration for the NGINX Gateway Fabric control plane deployment. | object | `{"affinity":{},"config":{"logging":{"level":"info"}},"configAnnotations":{},"extraVolumeMounts":[],"extraVolumes":[],"gatewayClassAnnotations":{},"gatewayClassName":"nginx","gatewayControllerName":"gateway.nginx.org/nginx-gateway-controller","gwAPIExperimentalFeatures":{"enable":false},"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric","tag":"edge"},"kind":"deployment","labels":{},"leaderElection":{"enable":true,"lockName":""},"lifecycle":{},"metrics":{"enable":true,"port":9113,"secure":false},"nodeSelector":{},"podAnnotations":{},"productTelemetry":{"enable":true},"readinessProbe":{"enable":true,"initialDelaySeconds":3,"port":8081},"replicas":1,"resources":{},"service":{"annotations":{}},"serviceAccount":{"annotations":{},"imagePullSecret":"","imagePullSecrets":[],"name":""},"snippetsFilters":{"enable":false},"terminationGracePeriodSeconds":30,"tolerations":[],"topologySpreadConstraints":[]}` | | `nginxGateway.affinity` | The affinity of the NGINX Gateway Fabric control plane pod. | object | `{}` | | `nginxGateway.config.logging.level` | Log level. | string | `"info"` | | `nginxGateway.configAnnotations` | Set of custom annotations for NginxGateway objects. | object | `{}` | From c4da0bbe4a8f96d249efa76548db8159d0cd16e1 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Mon, 28 Apr 2025 11:12:17 -0700 Subject: [PATCH 24/32] CP/DP Split: Add ability to set loadBalancerClass for load balancer Service (#3319) Add ability to set loadBalancerClass for load balancer Service Problem: We would like the ability to specify the loadBalanacerClass field on a load balancer service. Solution: Add ability to set loadBalancerClass for load balancer Service. Testing: Manually tested that deploying NGF with the nginx.service.loadBalancerClass Helm flag would correctly set the field. Also tested that modifying the NginxProxy resource would set the loadBalancerClass when the service was re-created (the field can only be set upon creation). --- apis/v1alpha2/nginxproxy_types.go | 6 ++++++ apis/v1alpha2/zz_generated.deepcopy.go | 5 +++++ charts/nginx-gateway-fabric/values.yaml | 4 ++++ config/crd/bases/gateway.nginx.org_nginxproxies.yaml | 5 +++++ deploy/crds.yaml | 5 +++++ internal/mode/static/provisioner/objects.go | 3 +++ internal/mode/static/provisioner/objects_test.go | 2 ++ 7 files changed, 30 insertions(+) diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index 3a0a3ccc73..7c716824fa 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -517,6 +517,12 @@ type ServiceSpec struct { // +optional LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + // LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + // Requires service type to be LoadBalancer. + // + // +optional + LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` + // Annotations contain any Service-specific annotations. // // +optional diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index 54e5c760b2..60bf2cd9cd 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -530,6 +530,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(string) **out = **in } + if in.LoadBalancerClass != nil { + in, out := &in.LoadBalancerClass, &out.LoadBalancerClass + *out = new(string) + **out = **in + } if in.Annotations != nil { in, out := &in.Annotations, &out.Annotations *out = make(map[string]string, len(*in)) diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index bd2872e9ad..c41ea7bb9c 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -427,6 +427,10 @@ nginx: # -- The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. # loadBalancerIP: "" + # -- LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + # Requires nginx.service.type set to LoadBalancer. + # loadBalancerClass: "" + # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. # loadBalancerSourceRanges: [] diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index b07a013fd8..0e28520896 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -3482,6 +3482,11 @@ spec: - Cluster - Local type: string + loadBalancerClass: + description: |- + LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + Requires service type to be LoadBalancer. + type: string loadBalancerIP: description: LoadBalancerIP is a static IP address for the load balancer. Requires service type to be LoadBalancer. diff --git a/deploy/crds.yaml b/deploy/crds.yaml index c08c007d12..56cd27eacc 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -4067,6 +4067,11 @@ spec: - Cluster - Local type: string + loadBalancerClass: + description: |- + LoadBalancerClass is the class of the load balancer implementation this Service belongs to. + Requires service type to be LoadBalancer. + type: string loadBalancerIP: description: LoadBalancerIP is a static IP address for the load balancer. Requires service type to be LoadBalancer. diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index f925b9c133..93ee801fef 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -460,6 +460,9 @@ func buildNginxService( if serviceCfg.LoadBalancerIP != nil { svc.Spec.LoadBalancerIP = *serviceCfg.LoadBalancerIP } + if serviceCfg.LoadBalancerClass != nil { + svc.Spec.LoadBalancerClass = serviceCfg.LoadBalancerClass + } if serviceCfg.LoadBalancerSourceRanges != nil { svc.Spec.LoadBalancerSourceRanges = serviceCfg.LoadBalancerSourceRanges } diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go index 0871f846c9..aa3a8dd747 100644 --- a/internal/mode/static/provisioner/objects_test.go +++ b/internal/mode/static/provisioner/objects_test.go @@ -253,6 +253,7 @@ func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { ServiceType: helpers.GetPointer(ngfAPIv1alpha2.ServiceTypeNodePort), ExternalTrafficPolicy: helpers.GetPointer(ngfAPIv1alpha2.ExternalTrafficPolicyCluster), LoadBalancerIP: helpers.GetPointer("1.2.3.4"), + LoadBalancerClass: helpers.GetPointer("myLoadBalancerClass"), LoadBalancerSourceRanges: []string{"5.6.7.8"}, }, Deployment: &ngfAPIv1alpha2.DeploymentSpec{ @@ -299,6 +300,7 @@ func TestBuildNginxResourceObjects_NginxProxyConfig(t *testing.T) { g.Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) g.Expect(svc.Spec.ExternalTrafficPolicy).To(Equal(corev1.ServiceExternalTrafficPolicyTypeCluster)) g.Expect(svc.Spec.LoadBalancerIP).To(Equal("1.2.3.4")) + g.Expect(*svc.Spec.LoadBalancerClass).To(Equal("myLoadBalancerClass")) g.Expect(svc.Spec.LoadBalancerSourceRanges).To(Equal([]string{"5.6.7.8"})) depObj := objects[5] From eb0ede6220be641b62a0176b43f142c345a96bda Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Mon, 28 Apr 2025 14:38:37 -0600 Subject: [PATCH 25/32] CP/DP Split: optimize configuration events (#3320) Problem: All config update events resulted in sending configuration to every Gateway, even if the change was irrelevant. Solution: Compare new config with old config to determine if a ConfigApply is necessary. Simplified the change processor and handler to no longer have to determine this. --- internal/mode/static/handler.go | 110 ++---- internal/mode/static/handler_test.go | 87 +++-- internal/mode/static/nginx/agent/action.go | 92 +++++ .../mode/static/nginx/agent/action_test.go | 347 ++++++++++++++++++ internal/mode/static/nginx/agent/agent.go | 38 +- .../mode/static/nginx/agent/agent_test.go | 161 ++++++-- .../agent/agentfakes/fake_nginx_updater.go | 86 +---- .../mode/static/nginx/agent/deployment.go | 12 +- .../static/nginx/agent/deployment_test.go | 7 + internal/mode/static/provisioner/templates.go | 18 +- .../mode/static/state/change_processor.go | 28 +- .../static/state/change_processor_test.go | 313 +++++++--------- .../static/state/dataplane/configuration.go | 13 +- .../state/dataplane/configuration_test.go | 4 - internal/mode/static/state/dataplane/types.go | 2 - .../state/statefakes/fake_change_processor.go | 33 +- internal/mode/static/state/store.go | 35 +- internal/mode/static/state/store_test.go | 55 --- internal/mode/static/telemetry/collector.go | 6 +- .../mode/static/telemetry/collector_test.go | 134 ++++--- .../fake_configuration_getter.go | 20 +- tests/suite/client_settings_test.go | 23 ++ 22 files changed, 989 insertions(+), 635 deletions(-) create mode 100644 internal/mode/static/nginx/agent/action.go create mode 100644 internal/mode/static/nginx/agent/action_test.go delete mode 100644 internal/mode/static/state/store_test.go diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index af510dadfc..3114bea746 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -109,23 +109,21 @@ type objectFilter struct { // (3) Updating control plane configuration. // (4) Tracks the NGINX Plus usage reporting Secret (if applicable). type eventHandlerImpl struct { - // latestConfiguration is the latest Configuration generation. - latestConfiguration *dataplane.Configuration + // latestConfigurations are the latest Configuration generation for each Gateway tree. + latestConfigurations map[types.NamespacedName]*dataplane.Configuration // objectFilters contains all created objectFilters, with the key being a filterKey objectFilters map[filterKey]objectFilter cfg eventHandlerConfig lock sync.Mutex - - // version is the current version number of the nginx config. - version int } // newEventHandlerImpl creates a new eventHandlerImpl. func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl { handler := &eventHandlerImpl{ - cfg: cfg, + cfg: cfg, + latestConfigurations: make(map[types.NamespacedName]*dataplane.Configuration), } handler.objectFilters = map[filterKey]objectFilter{ @@ -158,28 +156,23 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log h.parseAndCaptureEvent(ctx, logger, event) } - changeType, gr := h.cfg.processor.Process() + gr := h.cfg.processor.Process() // Once we've processed resources on startup and built our first graph, mark the Pod as ready. if !h.cfg.graphBuiltHealthChecker.ready { h.cfg.graphBuiltHealthChecker.setAsReady() } - h.sendNginxConfig(ctx, logger, gr, changeType) + h.sendNginxConfig(ctx, logger, gr) } // enable is called when the pod becomes leader to ensure the provisioner has // the latest configuration. func (h *eventHandlerImpl) enable(ctx context.Context) { - h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph(), state.ClusterStateChange) + h.sendNginxConfig(ctx, h.cfg.logger, h.cfg.processor.GetLatestGraph()) } -func (h *eventHandlerImpl) sendNginxConfig( - ctx context.Context, - logger logr.Logger, - gr *graph.Graph, - changeType state.ChangeType, -) { +func (h *eventHandlerImpl) sendNginxConfig(ctx context.Context, logger logr.Logger, gr *graph.Graph) { if gr == nil { return } @@ -215,68 +208,30 @@ func (h *eventHandlerImpl) sendNginxConfig( panic("expected deployment, got nil") } - configApplied := h.processStateAndBuildConfig(ctx, logger, gr, gw, changeType, deployment) - - configErr := deployment.GetLatestConfigError() - upstreamErr := deployment.GetLatestUpstreamError() - err := errors.Join(configErr, upstreamErr) - - if configApplied || err != nil { - obj := &status.QueueObject{ - UpdateType: status.UpdateAll, - Error: err, - Deployment: gw.DeploymentName, - } - h.cfg.statusQueue.Enqueue(obj) - } - } -} - -func (h *eventHandlerImpl) processStateAndBuildConfig( - ctx context.Context, - logger logr.Logger, - gr *graph.Graph, - currentGateway *graph.Gateway, - changeType state.ChangeType, - deployment *agent.Deployment, -) bool { - var configApplied bool - switch changeType { - case state.EndpointsOnlyChange: - h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, currentGateway, h.cfg.serviceResolver, h.version, h.cfg.plus) + cfg := dataplane.BuildConfiguration(ctx, gr, gw, h.cfg.serviceResolver, h.cfg.plus) depCtx, getErr := h.getDeploymentContext(ctx) if getErr != nil { logger.Error(getErr, "error getting deployment context for usage reporting") } cfg.DeploymentContext = depCtx - h.setLatestConfiguration(&cfg) + h.setLatestConfiguration(gw, &cfg) deployment.FileLock.Lock() - if h.cfg.plus { - configApplied = h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, cfg) - } else { - configApplied = h.updateNginxConf(deployment, cfg) - } + h.updateNginxConf(deployment, cfg) deployment.FileLock.Unlock() - case state.ClusterStateChange: - h.version++ - cfg := dataplane.BuildConfiguration(ctx, gr, currentGateway, h.cfg.serviceResolver, h.version, h.cfg.plus) - depCtx, getErr := h.getDeploymentContext(ctx) - if getErr != nil { - logger.Error(getErr, "error getting deployment context for usage reporting") - } - cfg.DeploymentContext = depCtx - h.setLatestConfiguration(&cfg) + configErr := deployment.GetLatestConfigError() + upstreamErr := deployment.GetLatestUpstreamError() + err := errors.Join(configErr, upstreamErr) - deployment.FileLock.Lock() - configApplied = h.updateNginxConf(deployment, cfg) - deployment.FileLock.Unlock() + obj := &status.QueueObject{ + UpdateType: status.UpdateAll, + Error: err, + Deployment: gw.DeploymentName, + } + h.cfg.statusQueue.Enqueue(obj) } - - return configApplied } func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) { @@ -451,16 +406,14 @@ func (h *eventHandlerImpl) parseAndCaptureEvent(ctx context.Context, logger logr func (h *eventHandlerImpl) updateNginxConf( deployment *agent.Deployment, conf dataplane.Configuration, -) bool { +) { files := h.cfg.generator.Generate(conf) - applied := h.cfg.nginxUpdater.UpdateConfig(deployment, files) + h.cfg.nginxUpdater.UpdateConfig(deployment, files) // If using NGINX Plus, update upstream servers using the API. if h.cfg.plus { h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, conf) } - - return applied } // updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status @@ -570,21 +523,28 @@ func (h *eventHandlerImpl) getDeploymentContext(ctx context.Context) (dataplane. } // GetLatestConfiguration gets the latest configuration. -func (h *eventHandlerImpl) GetLatestConfiguration() *dataplane.Configuration { +func (h *eventHandlerImpl) GetLatestConfiguration() []*dataplane.Configuration { h.lock.Lock() defer h.lock.Unlock() - return h.latestConfiguration + configs := make([]*dataplane.Configuration, 0, len(h.latestConfigurations)) + for _, cfg := range h.latestConfigurations { + configs = append(configs, cfg) + } + + return configs } // setLatestConfiguration sets the latest configuration. -// TODO(sberman): once we support multiple Gateways, this will likely have to be a map -// of all configurations. -func (h *eventHandlerImpl) setLatestConfiguration(cfg *dataplane.Configuration) { +func (h *eventHandlerImpl) setLatestConfiguration(gateway *graph.Gateway, cfg *dataplane.Configuration) { + if gateway == nil || gateway.Source == nil { + return + } + h.lock.Lock() defer h.lock.Unlock() - h.latestConfiguration = cfg + h.latestConfigurations[client.ObjectKeyFromObject(gateway.Source)] = cfg } func objectFilterKey(obj client.Object, nsName types.NamespacedName) filterKey { diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index ec5bfa437d..df5ee9d70a 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -31,7 +31,6 @@ import ( agentgrpcfakes "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/agent/grpc/grpcfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/nginx/config/configfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/provisioner/provisionerfakes" - "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/dataplane" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/graph" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/state/statefakes" @@ -99,11 +98,10 @@ var _ = Describe("eventHandler", func() { } fakeProcessor = &statefakes.FakeChangeProcessor{} - fakeProcessor.ProcessReturns(state.NoChange, &graph.Graph{}) + fakeProcessor.ProcessReturns(&graph.Graph{}) fakeProcessor.GetLatestGraphReturns(baseGraph) fakeGenerator = &configfakes.FakeGenerator{} fakeNginxUpdater = &agentfakes.FakeNginxUpdater{} - fakeNginxUpdater.UpdateConfigReturns(true) fakeProvisioner = &provisionerfakes.FakeProvisioner{} fakeProvisioner.RegisterGatewayReturns(nil) fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} @@ -163,7 +161,7 @@ var _ = Describe("eventHandler", func() { } BeforeEach(func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange, baseGraph) + fakeProcessor.ProcessReturns(baseGraph) fakeGenerator.GenerateReturns(fakeCfgFiles) }) @@ -178,11 +176,13 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) checkUpsertEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) }) It("should process Delete", func() { e := &events.DeleteEvent{ @@ -193,15 +193,17 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) checkDeleteEventExpectations(e) expectReconfig(dcfg, fakeCfgFiles) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) }) It("should not build anything if Gateway isn't set", func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{}) + fakeProcessor.ProcessReturns(&graph.Graph{}) e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} @@ -218,7 +220,7 @@ var _ = Describe("eventHandler", func() { }).Should(Equal(1)) }) It("should not build anything if graph is nil", func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange, nil) + fakeProcessor.ProcessReturns(nil) e := &events.UpsertEvent{Resource: &gatewayv1.HTTPRoute{}} batch := []interface{}{e} @@ -235,7 +237,7 @@ var _ = Describe("eventHandler", func() { }).Should(Equal(0)) }) It("should update gateway class even if gateway is invalid", func() { - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ + fakeProcessor.ProcessReturns(&graph.Graph{ Gateways: map[types.NamespacedName]*graph.Gateway{ {Namespace: "test", Name: "gateway"}: { Valid: false, @@ -273,8 +275,11 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 2, &graph.Gateway{}) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) }) }) }) @@ -298,7 +303,7 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{&events.UpsertEvent{Resource: cfg(ngfAPI.ControllerLogLevelError)}} handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) Eventually( func() int { @@ -317,7 +322,7 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{&events.UpsertEvent{Resource: cfg(ngfAPI.ControllerLogLevel("invalid"))}} handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) Eventually( func() int { @@ -349,7 +354,7 @@ var _ = Describe("eventHandler", func() { } handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) Eventually( func() int { @@ -367,7 +372,7 @@ var _ = Describe("eventHandler", func() { }) }) - When("receiving an EndpointsOnlyChange update", func() { + Context("NGINX Plus API calls", func() { e := &events.UpsertEvent{Resource: &discoveryV1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "nginx-gateway", @@ -377,9 +382,17 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} BeforeEach(func() { - fakeProcessor.ProcessReturns(state.EndpointsOnlyChange, &graph.Graph{ + fakeProcessor.ProcessReturns(&graph.Graph{ Gateways: map[types.NamespacedName]*graph.Gateway{ - {}: {Valid: true}, + {}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + }, }, }) }) @@ -390,11 +403,14 @@ var _ = Describe("eventHandler", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) dcfg.NginxPlus = dataplane.NginxPlus{AllowedAddresses: []string{"127.0.0.1"}} - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) - Expect(fakeGenerator.GenerateCallCount()).To(Equal(0)) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) + + Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) Expect(fakeNginxUpdater.UpdateUpstreamServersCallCount()).To(Equal(1)) }) }) @@ -403,8 +419,11 @@ var _ = Describe("eventHandler", func() { It("should not call the NGINX Plus API", func() { handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) Expect(fakeGenerator.GenerateCallCount()).To(Equal(1)) Expect(fakeNginxUpdater.UpdateConfigCallCount()).To(Equal(1)) @@ -456,17 +475,27 @@ var _ = Describe("eventHandler", func() { batch := []interface{}{e} readyChannel := handler.cfg.graphBuiltHealthChecker.getReadyCh() - fakeProcessor.ProcessReturns(state.ClusterStateChange, &graph.Graph{ + fakeProcessor.ProcessReturns(&graph.Graph{ Gateways: map[types.NamespacedName]*graph.Gateway{ - {}: {Valid: true}, + {}: { + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway", + }, + }, + Valid: true, + }, }, }) Expect(handler.cfg.graphBuiltHealthChecker.readyCheck(nil)).ToNot(Succeed()) handler.HandleEventBatch(context.Background(), logr.Discard(), batch) - dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, 1, &graph.Gateway{}) - Expect(helpers.Diff(handler.GetLatestConfiguration(), &dcfg)).To(BeEmpty()) + dcfg := dataplane.GetDefaultConfiguration(&graph.Graph{}, &graph.Gateway{}) + config := handler.GetLatestConfiguration() + Expect(config).To(HaveLen(1)) + Expect(helpers.Diff(config[0], &dcfg)).To(BeEmpty()) Expect(readyChannel).To(BeClosed()) @@ -483,7 +512,7 @@ var _ = Describe("eventHandler", func() { Expect(handle).Should(Panic()) - Expect(handler.GetLatestConfiguration()).To(BeNil()) + Expect(handler.GetLatestConfiguration()).To(BeEmpty()) }) }) diff --git a/internal/mode/static/nginx/agent/action.go b/internal/mode/static/nginx/agent/action.go new file mode 100644 index 0000000000..575cbf055b --- /dev/null +++ b/internal/mode/static/nginx/agent/action.go @@ -0,0 +1,92 @@ +package agent + +import ( + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/protobuf/types/known/structpb" +) + +func actionsEqual(a, b []*pb.NGINXPlusAction) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + switch actionA := a[i].Action.(type) { + case *pb.NGINXPlusAction_UpdateHttpUpstreamServers: + actionB, ok := b[i].Action.(*pb.NGINXPlusAction_UpdateHttpUpstreamServers) + if !ok || !httpUpstreamsEqual(actionA.UpdateHttpUpstreamServers, actionB.UpdateHttpUpstreamServers) { + return false + } + case *pb.NGINXPlusAction_UpdateStreamServers: + actionB, ok := b[i].Action.(*pb.NGINXPlusAction_UpdateStreamServers) + if !ok || !streamUpstreamsEqual(actionA.UpdateStreamServers, actionB.UpdateStreamServers) { + return false + } + default: + return false + } + } + + return true +} + +func httpUpstreamsEqual(a, b *pb.UpdateHTTPUpstreamServers) bool { + if a.HttpUpstreamName != b.HttpUpstreamName { + return false + } + + if len(a.Servers) != len(b.Servers) { + return false + } + + for i := range a.Servers { + if !structsEqual(a.Servers[i], b.Servers[i]) { + return false + } + } + + return true +} + +func streamUpstreamsEqual(a, b *pb.UpdateStreamServers) bool { + if a.UpstreamStreamName != b.UpstreamStreamName { + return false + } + + if len(a.Servers) != len(b.Servers) { + return false + } + + for i := range a.Servers { + if !structsEqual(a.Servers[i], b.Servers[i]) { + return false + } + } + + return true +} + +func structsEqual(a, b *structpb.Struct) bool { + if len(a.Fields) != len(b.Fields) { + return false + } + + for key, valueA := range a.Fields { + valueB, exists := b.Fields[key] + if !exists || !valuesEqual(valueA, valueB) { + return false + } + } + + return true +} + +func valuesEqual(a, b *structpb.Value) bool { + switch valueA := a.Kind.(type) { + case *structpb.Value_StringValue: + valueB, ok := b.Kind.(*structpb.Value_StringValue) + return ok && valueA.StringValue == valueB.StringValue + default: + return false + } +} diff --git a/internal/mode/static/nginx/agent/action_test.go b/internal/mode/static/nginx/agent/action_test.go new file mode 100644 index 0000000000..491dbc0dd8 --- /dev/null +++ b/internal/mode/static/nginx/agent/action_test.go @@ -0,0 +1,347 @@ +package agent + +import ( + "testing" + + pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestActionsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + actionA []*pb.NGINXPlusAction + actionB []*pb.NGINXPlusAction + expected bool + }{ + { + name: "Actions are equal", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "Actions have different types", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "upstream1", + }, + }, + }, + }, + expected: false, + }, + { + name: "Actions have different values", + actionA: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + }, + }, + }, + actionB: []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(actionsEqual(tt.actionA, tt.actionB)).To(Equal(tt.expected)) + }) + } +} + +func TestHttpUpstreamsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + upstreamA *pb.UpdateHTTPUpstreamServers + upstreamB *pb.UpdateHTTPUpstreamServers + name string + expected bool + }{ + { + name: "HTTP upstreams are equal", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + expected: true, + }, + { + name: "HTTP upstreams have different upstream names", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream2", + }, + expected: false, + }, + { + name: "HTTP upstreams have different server lengths", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + {Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + { + name: "HTTP upstreams have different server contents", + upstreamA: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + upstreamB: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "upstream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(httpUpstreamsEqual(tt.upstreamA, tt.upstreamB)).To(Equal(tt.expected)) + }) + } +} + +func TestStreamUpstreamsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + upstreamA *pb.UpdateStreamServers + upstreamB *pb.UpdateStreamServers + name string + expected bool + }{ + { + name: "Stream upstreams are equal", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + expected: true, + }, + { + name: "Stream have different upstream names", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream2", + }, + expected: false, + }, + { + name: "Stream upstreams have different server lengths", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}}, + {Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + { + name: "Stream upstreams have different server contents", + upstreamA: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value1"}}}}, + }, + }, + upstreamB: &pb.UpdateStreamServers{ + UpstreamStreamName: "stream1", + Servers: []*structpb.Struct{ + {Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value2"}}}}, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(streamUpstreamsEqual(tt.upstreamA, tt.upstreamB)).To(Equal(tt.expected)) + }) + } +} + +func TestStructsEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + structA *structpb.Struct + structB *structpb.Struct + name string + expected bool + }{ + { + name: "Structs are equal", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + expected: true, + }, + { + name: "Structs have different values", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key": {Kind: &structpb.Value_StringValue{StringValue: "different"}}}, + }, + expected: false, + }, + { + name: "Structs have different keys", + structA: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key1": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + structB: &structpb.Struct{ + Fields: map[string]*structpb.Value{"key2": {Kind: &structpb.Value_StringValue{StringValue: "value"}}}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(structsEqual(tt.structA, tt.structB)).To(Equal(tt.expected)) + }) + } +} + +func TestValuesEqual(t *testing.T) { + t.Parallel() + + tests := []struct { + valueA *structpb.Value + valueB *structpb.Value + name string + expected bool + }{ + { + name: "Values are equal", + valueA: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + valueB: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + expected: true, + }, + { + name: "Values are not equal", + valueA: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "value"}}, + valueB: &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: "different"}}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + g.Expect(valuesEqual(tt.valueA, tt.valueB)).To(Equal(tt.expected)) + }) + } +} diff --git a/internal/mode/static/nginx/agent/agent.go b/internal/mode/static/nginx/agent/agent.go index 7bc0818214..dbe49deb0c 100644 --- a/internal/mode/static/nginx/agent/agent.go +++ b/internal/mode/static/nginx/agent/agent.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sort" "time" "github.com/go-logr/logr" @@ -27,8 +28,8 @@ const retryUpstreamTimeout = 5 * time.Second // NginxUpdater is an interface for updating NGINX using the NGINX agent. type NginxUpdater interface { - UpdateConfig(deployment *Deployment, files []File) bool - UpdateUpstreamServers(deployment *Deployment, conf dataplane.Configuration) bool + UpdateConfig(deployment *Deployment, files []File) + UpdateUpstreamServers(deployment *Deployment, conf dataplane.Configuration) } // NginxUpdaterImpl implements the NginxUpdater interface. @@ -73,7 +74,6 @@ func NewNginxUpdater( } // UpdateConfig sends the nginx configuration to the agent. -// Returns whether the configuration was sent to any agents. // // The flow of events is as follows: // - Set the configuration files on the deployment. @@ -86,27 +86,28 @@ func NewNginxUpdater( func (n *NginxUpdaterImpl) UpdateConfig( deployment *Deployment, files []File, -) bool { +) { msg := deployment.SetFiles(files) - applied := deployment.GetBroadcaster().Send(msg) + if msg == nil { + return + } + + applied := deployment.GetBroadcaster().Send(*msg) if applied { n.logger.Info("Sent nginx configuration to agent") } deployment.SetLatestConfigError(deployment.GetConfigurationStatus()) - - return applied } // UpdateUpstreamServers sends an APIRequest to the agent to update upstream servers using the NGINX Plus API. // Only applicable when using NGINX Plus. -// Returns whether the configuration was sent to any agents. func (n *NginxUpdaterImpl) UpdateUpstreamServers( deployment *Deployment, conf dataplane.Configuration, -) bool { +) { if !n.plus { - return false + return } broadcaster := deployment.GetBroadcaster() @@ -114,12 +115,6 @@ func (n *NginxUpdaterImpl) UpdateUpstreamServers( // reset the latest error to nil now that we're applying new config deployment.SetLatestUpstreamError(nil) - // TODO(sberman): optimize this by only sending updates that are necessary. - // Call GetUpstreams first (will need Subscribers to send responses back), and - // then determine which upstreams actually need to be updated. - // OR we can possibly just use the most recent NGINXPlusActions to see what the last state - // of upstreams were, and only update the diff. - var errs []error var applied bool actions := make([]*pb.NGINXPlusAction, 0, len(conf.Upstreams)+len(conf.StreamUpstreams)) @@ -141,6 +136,10 @@ func (n *NginxUpdaterImpl) UpdateUpstreamServers( actions = append(actions, action) } + if actionsEqual(deployment.GetNGINXPlusActions(), actions) { + return + } + for _, action := range actions { msg := broadcast.NginxAgentMessage{ Type: broadcast.APIRequest, @@ -163,8 +162,6 @@ func (n *NginxUpdaterImpl) UpdateUpstreamServers( // Store the most recent actions on the deployment so any new subscribers can apply them when first connecting. deployment.SetNGINXPlusActions(actions) - - return applied } func buildHTTPUpstreamServers(upstream dataplane.Upstream) *pb.UpdateHTTPUpstreamServers { @@ -197,6 +194,11 @@ func buildUpstreamServers(upstream dataplane.Upstream) []*structpb.Struct { servers = append(servers, server) } + // sort the servers to avoid unnecessary reloads + sort.Slice(servers, func(i, j int) bool { + return servers[i].Fields["server"].GetStringValue() < servers[j].Fields["server"].GetStringValue() + }) + return servers } diff --git a/internal/mode/static/nginx/agent/agent_test.go b/internal/mode/static/nginx/agent/agent_test.go index 3266003981..b0147d4d96 100644 --- a/internal/mode/static/nginx/agent/agent_test.go +++ b/internal/mode/static/nginx/agent/agent_test.go @@ -21,24 +21,16 @@ func TestUpdateConfig(t *testing.T) { t.Parallel() tests := []struct { - name string - configApplied bool - expErr bool + name string + expErr bool }{ { - name: "success", - configApplied: true, - expErr: false, + name: "success", + expErr: false, }, { - name: "error returned from agent", - configApplied: true, - expErr: true, - }, - { - name: "configuration not applied", - configApplied: false, - expErr: false, + name: "error returned from agent", + expErr: true, }, } @@ -48,7 +40,7 @@ func TestUpdateConfig(t *testing.T) { g := NewWithT(t) fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} - fakeBroadcaster.SendReturns(test.configApplied) + fakeBroadcaster.SendReturns(true) plus := false updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, plus) @@ -70,15 +62,16 @@ func TestUpdateConfig(t *testing.T) { deployment.SetPodErrorStatus("pod1", testErr) } - applied := updater.UpdateConfig(deployment, []File{file}) + updater.UpdateConfig(deployment, []File{file}) - g.Expect(applied).To(Equal(test.configApplied)) + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(1)) g.Expect(deployment.GetFile(file.Meta.Name, file.Meta.Hash)).To(Equal(file.Contents)) if test.expErr { g.Expect(deployment.GetLatestConfigError()).To(Equal(testErr)) // ensure that the error is cleared after the next config is applied deployment.SetPodErrorStatus("pod1", nil) + file.Meta.Hash = "5678" updater.UpdateConfig(deployment, []File{file}) g.Expect(deployment.GetLatestConfigError()).ToNot(HaveOccurred()) } else { @@ -88,6 +81,37 @@ func TestUpdateConfig(t *testing.T) { } } +func TestUpdateConfig_NoChange(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, false) + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + file := File{ + Meta: &pb.FileMeta{ + Name: "test.conf", + Hash: "12345", + }, + Contents: []byte("test content"), + } + + // Set the initial files on the deployment + deployment.SetFiles([]File{file}) + + // Call UpdateConfig with the same files + updater.UpdateConfig(deployment, []File{file}) + + // Verify that no new configuration was sent + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) +} + func TestUpdateUpstreamServers(t *testing.T) { t.Parallel() @@ -95,43 +119,31 @@ func TestUpdateUpstreamServers(t *testing.T) { name string buildUpstreams bool plus bool - configApplied bool expErr bool }{ { name: "success", plus: true, buildUpstreams: true, - configApplied: true, expErr: false, }, { name: "no upstreams to apply", plus: true, buildUpstreams: false, - configApplied: false, expErr: false, }, { - name: "not running nginx plus", - plus: false, - configApplied: false, - expErr: false, + name: "not running nginx plus", + plus: false, + expErr: false, }, { name: "error returned from agent", plus: true, buildUpstreams: true, - configApplied: true, expErr: true, }, - { - name: "configuration not applied", - plus: true, - buildUpstreams: true, - configApplied: false, - expErr: false, - }, } for _, test := range tests { @@ -140,7 +152,6 @@ func TestUpdateUpstreamServers(t *testing.T) { g := NewWithT(t) fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} - fakeBroadcaster.SendReturns(test.configApplied) updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, test.plus) updater.retryTimeout = 0 @@ -182,8 +193,7 @@ func TestUpdateUpstreamServers(t *testing.T) { } } - applied := updater.UpdateUpstreamServers(deployment, conf) - g.Expect(applied).To(Equal(test.configApplied)) + updater.UpdateUpstreamServers(deployment, conf) expActions := make([]*pb.NGINXPlusAction, 0) if test.buildUpstreams { @@ -221,8 +231,10 @@ func TestUpdateUpstreamServers(t *testing.T) { if !test.plus { g.Expect(deployment.GetNGINXPlusActions()).To(BeNil()) - } else { + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) + } else if test.buildUpstreams { g.Expect(deployment.GetNGINXPlusActions()).To(Equal(expActions)) + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(2)) } if test.expErr { @@ -243,6 +255,83 @@ func TestUpdateUpstreamServers(t *testing.T) { } } +func TestUpdateUpstreamServers_NoChange(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + fakeBroadcaster := &broadcastfakes.FakeBroadcaster{} + + updater := NewNginxUpdater(logr.Discard(), fake.NewFakeClient(), &status.Queue{}, nil, true) + updater.retryTimeout = 0 + + deployment := &Deployment{ + broadcaster: fakeBroadcaster, + podStatuses: make(map[string]error), + } + + conf := dataplane.Configuration{ + Upstreams: []dataplane.Upstream{ + { + Name: "test-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "1.2.3.4", + Port: 8080, + }, + }, + }, + }, + StreamUpstreams: []dataplane.Upstream{ + { + Name: "test-stream-upstream", + Endpoints: []resolver.Endpoint{ + { + Address: "5.6.7.8", + }, + }, + }, + }, + } + + initialActions := []*pb.NGINXPlusAction{ + { + Action: &pb.NGINXPlusAction_UpdateHttpUpstreamServers{ + UpdateHttpUpstreamServers: &pb.UpdateHTTPUpstreamServers{ + HttpUpstreamName: "test-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("1.2.3.4:8080"), + }, + }, + }, + }, + }, + }, + { + Action: &pb.NGINXPlusAction_UpdateStreamServers{ + UpdateStreamServers: &pb.UpdateStreamServers{ + UpstreamStreamName: "test-stream-upstream", + Servers: []*structpb.Struct{ + { + Fields: map[string]*structpb.Value{ + "server": structpb.NewStringValue("5.6.7.8"), + }, + }, + }, + }, + }, + }, + } + deployment.SetNGINXPlusActions(initialActions) + + // Call UpdateUpstreamServers with the same configuration + updater.UpdateUpstreamServers(deployment, conf) + + // Verify that no new actions were sent + g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(0)) +} + func TestGetPortAndIPFormat(t *testing.T) { t.Parallel() diff --git a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go index 6c3165e5b6..f69009ce04 100644 --- a/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go +++ b/internal/mode/static/nginx/agent/agentfakes/fake_nginx_updater.go @@ -9,57 +9,39 @@ import ( ) type FakeNginxUpdater struct { - UpdateConfigStub func(*agent.Deployment, []agent.File) bool + UpdateConfigStub func(*agent.Deployment, []agent.File) updateConfigMutex sync.RWMutex updateConfigArgsForCall []struct { arg1 *agent.Deployment arg2 []agent.File } - updateConfigReturns struct { - result1 bool - } - updateConfigReturnsOnCall map[int]struct { - result1 bool - } - UpdateUpstreamServersStub func(*agent.Deployment, dataplane.Configuration) bool + UpdateUpstreamServersStub func(*agent.Deployment, dataplane.Configuration) updateUpstreamServersMutex sync.RWMutex updateUpstreamServersArgsForCall []struct { arg1 *agent.Deployment arg2 dataplane.Configuration } - updateUpstreamServersReturns struct { - result1 bool - } - updateUpstreamServersReturnsOnCall map[int]struct { - result1 bool - } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeNginxUpdater) UpdateConfig(arg1 *agent.Deployment, arg2 []agent.File) bool { +func (fake *FakeNginxUpdater) UpdateConfig(arg1 *agent.Deployment, arg2 []agent.File) { var arg2Copy []agent.File if arg2 != nil { arg2Copy = make([]agent.File, len(arg2)) copy(arg2Copy, arg2) } fake.updateConfigMutex.Lock() - ret, specificReturn := fake.updateConfigReturnsOnCall[len(fake.updateConfigArgsForCall)] fake.updateConfigArgsForCall = append(fake.updateConfigArgsForCall, struct { arg1 *agent.Deployment arg2 []agent.File }{arg1, arg2Copy}) stub := fake.UpdateConfigStub - fakeReturns := fake.updateConfigReturns fake.recordInvocation("UpdateConfig", []interface{}{arg1, arg2Copy}) fake.updateConfigMutex.Unlock() if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 + fake.UpdateConfigStub(arg1, arg2) } - return fakeReturns.result1 } func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { @@ -68,7 +50,7 @@ func (fake *FakeNginxUpdater) UpdateConfigCallCount() int { return len(fake.updateConfigArgsForCall) } -func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(*agent.Deployment, []agent.File) bool) { +func (fake *FakeNginxUpdater) UpdateConfigCalls(stub func(*agent.Deployment, []agent.File)) { fake.updateConfigMutex.Lock() defer fake.updateConfigMutex.Unlock() fake.UpdateConfigStub = stub @@ -81,47 +63,18 @@ func (fake *FakeNginxUpdater) UpdateConfigArgsForCall(i int) (*agent.Deployment, return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeNginxUpdater) UpdateConfigReturns(result1 bool) { - fake.updateConfigMutex.Lock() - defer fake.updateConfigMutex.Unlock() - fake.UpdateConfigStub = nil - fake.updateConfigReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeNginxUpdater) UpdateConfigReturnsOnCall(i int, result1 bool) { - fake.updateConfigMutex.Lock() - defer fake.updateConfigMutex.Unlock() - fake.UpdateConfigStub = nil - if fake.updateConfigReturnsOnCall == nil { - fake.updateConfigReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.updateConfigReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeNginxUpdater) UpdateUpstreamServers(arg1 *agent.Deployment, arg2 dataplane.Configuration) bool { +func (fake *FakeNginxUpdater) UpdateUpstreamServers(arg1 *agent.Deployment, arg2 dataplane.Configuration) { fake.updateUpstreamServersMutex.Lock() - ret, specificReturn := fake.updateUpstreamServersReturnsOnCall[len(fake.updateUpstreamServersArgsForCall)] fake.updateUpstreamServersArgsForCall = append(fake.updateUpstreamServersArgsForCall, struct { arg1 *agent.Deployment arg2 dataplane.Configuration }{arg1, arg2}) stub := fake.UpdateUpstreamServersStub - fakeReturns := fake.updateUpstreamServersReturns fake.recordInvocation("UpdateUpstreamServers", []interface{}{arg1, arg2}) fake.updateUpstreamServersMutex.Unlock() if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 + fake.UpdateUpstreamServersStub(arg1, arg2) } - return fakeReturns.result1 } func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { @@ -130,7 +83,7 @@ func (fake *FakeNginxUpdater) UpdateUpstreamServersCallCount() int { return len(fake.updateUpstreamServersArgsForCall) } -func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func(*agent.Deployment, dataplane.Configuration) bool) { +func (fake *FakeNginxUpdater) UpdateUpstreamServersCalls(stub func(*agent.Deployment, dataplane.Configuration)) { fake.updateUpstreamServersMutex.Lock() defer fake.updateUpstreamServersMutex.Unlock() fake.UpdateUpstreamServersStub = stub @@ -143,29 +96,6 @@ func (fake *FakeNginxUpdater) UpdateUpstreamServersArgsForCall(i int) (*agent.De return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeNginxUpdater) UpdateUpstreamServersReturns(result1 bool) { - fake.updateUpstreamServersMutex.Lock() - defer fake.updateUpstreamServersMutex.Unlock() - fake.UpdateUpstreamServersStub = nil - fake.updateUpstreamServersReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeNginxUpdater) UpdateUpstreamServersReturnsOnCall(i int, result1 bool) { - fake.updateUpstreamServersMutex.Lock() - defer fake.updateUpstreamServersMutex.Unlock() - fake.UpdateUpstreamServersStub = nil - if fake.updateUpstreamServersReturnsOnCall == nil { - fake.updateUpstreamServersReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.updateUpstreamServersReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - func (fake *FakeNginxUpdater) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() diff --git a/internal/mode/static/nginx/agent/deployment.go b/internal/mode/static/nginx/agent/deployment.go index 5da82c7fd9..3aa8d80b6b 100644 --- a/internal/mode/static/nginx/agent/deployment.go +++ b/internal/mode/static/nginx/agent/deployment.go @@ -173,7 +173,7 @@ func (d *Deployment) GetFile(name, hash string) []byte { // SetFiles updates the nginx files and fileOverviews for the deployment and returns the message to send. // The deployment FileLock MUST already be locked before calling this function. -func (d *Deployment) SetFiles(files []File) broadcast.NginxAgentMessage { +func (d *Deployment) SetFiles(files []File) *broadcast.NginxAgentMessage { d.files = files fileOverviews := make([]*pb.File, 0, len(files)) @@ -194,10 +194,16 @@ func (d *Deployment) SetFiles(files []File) broadcast.NginxAgentMessage { }) } - d.configVersion = filesHelper.GenerateConfigVersion(fileOverviews) + newConfigVersion := filesHelper.GenerateConfigVersion(fileOverviews) + if d.configVersion == newConfigVersion { + // files have not changed, nothing to send + return nil + } + + d.configVersion = newConfigVersion d.fileOverviews = fileOverviews - return broadcast.NginxAgentMessage{ + return &broadcast.NginxAgentMessage{ Type: broadcast.ConfigApplyRequest, FileOverviews: fileOverviews, ConfigVersion: d.configVersion, diff --git a/internal/mode/static/nginx/agent/deployment_test.go b/internal/mode/static/nginx/agent/deployment_test.go index 3c6dc4c859..57d9510588 100644 --- a/internal/mode/static/nginx/agent/deployment_test.go +++ b/internal/mode/static/nginx/agent/deployment_test.go @@ -57,6 +57,13 @@ func TestSetAndGetFiles(t *testing.T) { g.Expect(deployment.GetFile("invalid", "12345")).To(BeNil()) g.Expect(deployment.GetFile("test.conf", "invalid")).To(BeNil()) + + // Set the same files again + msg = deployment.SetFiles(files) + g.Expect(msg).To(BeNil()) + + newFileOverviews, _ := deployment.GetFileOverviews() + g.Expect(newFileOverviews).To(Equal(fileOverviews)) } func TestSetNGINXPlusActions(t *testing.T) { diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go index e58ee3abec..326cac7478 100644 --- a/internal/mode/static/provisioner/templates.go +++ b/internal/mode/static/provisioner/templates.go @@ -61,15 +61,15 @@ log: {{- if .EnableMetrics }} collector: receivers: - host_metrics: - collection_interval: 1m0s - initial_delay: 1s - scrapers: - cpu: {} - memory: {} - disk: {} - network: {} - filesystem: {} + host_metrics: + collection_interval: 1m0s + initial_delay: 1s + scrapers: + cpu: {} + memory: {} + disk: {} + network: {} + filesystem: {} processors: batch: {} exporters: diff --git a/internal/mode/static/state/change_processor.go b/internal/mode/static/state/change_processor.go index 1cd72f7612..1d136383b8 100644 --- a/internal/mode/static/state/change_processor.go +++ b/internal/mode/static/state/change_processor.go @@ -28,19 +28,6 @@ import ( //go:generate go tool counterfeiter -generate -// ChangeType is the type of change that occurred based on a k8s object event. -type ChangeType int - -const ( - // NoChange means that nothing changed. - NoChange ChangeType = iota - // EndpointsOnlyChange means that only the endpoints changed. - // If using NGINX Plus, this update can be done using the API without a reload. - EndpointsOnlyChange - // ClusterStateChange means that something other than endpoints changed. This requires an NGINX reload. - ClusterStateChange -) - //counterfeiter:generate . ChangeProcessor // ChangeProcessor processes the changes to resources and produces a graph-like representation @@ -55,8 +42,8 @@ type ChangeProcessor interface { // this ChangeProcessor was created for. CaptureDeleteChange(resourceType ngftypes.ObjectType, nsname types.NamespacedName) // Process produces a graph-like representation of GatewayAPI resources. - // If no changes were captured, the changed return argument will be NoChange and graph will be empty. - Process() (changeType ChangeType, graphCfg *graph.Graph) + // If no changes were captured, the graph will be empty. + Process() (graphCfg *graph.Graph) // GetLatestGraph returns the latest Graph. GetLatestGraph() *graph.Graph } @@ -88,7 +75,7 @@ type ChangeProcessorImpl struct { // updater acts upon the cluster state. updater Updater // getAndResetClusterStateChanged tells if and how the cluster state has changed. - getAndResetClusterStateChanged func() ChangeType + getAndResetClusterStateChanged func() bool cfg ChangeProcessorConfig lock sync.Mutex @@ -268,13 +255,12 @@ func (c *ChangeProcessorImpl) CaptureDeleteChange(resourceType ngftypes.ObjectTy c.updater.Delete(resourceType, nsname) } -func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { +func (c *ChangeProcessorImpl) Process() *graph.Graph { c.lock.Lock() defer c.lock.Unlock() - changeType := c.getAndResetClusterStateChanged() - if changeType == NoChange { - return NoChange, nil + if !c.getAndResetClusterStateChanged() { + return nil } c.latestGraph = graph.BuildGraph( @@ -285,7 +271,7 @@ func (c *ChangeProcessorImpl) Process() (ChangeType, *graph.Graph) { c.cfg.Validators, ) - return changeType, c.latestGraph + return c.latestGraph } func (c *ChangeProcessorImpl) GetLatestGraph() *graph.Graph { diff --git a/internal/mode/static/state/change_processor_test.go b/internal/mode/static/state/change_processor_test.go index 76055298f0..4797710fa3 100644 --- a/internal/mode/static/state/change_processor_test.go +++ b/internal/mode/static/state/change_processor_test.go @@ -400,6 +400,26 @@ var _ = Describe("ChangeProcessor", func() { processor state.ChangeProcessor ) + testUpsertTriggersChange := func(obj client.Object) { + processor.CaptureUpsertChange(obj) + Expect(processor.Process()).ToNot(BeNil()) + } + + testUpsertDoesNotTriggerChange := func(obj client.Object) { + processor.CaptureUpsertChange(obj) + Expect(processor.Process()).To(BeNil()) + } + + testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName) { + processor.CaptureDeleteChange(obj, nsname) + Expect(processor.Process()).ToNot(BeNil()) + } + + testDeleteDoesNotTriggerChange := func(obj client.Object, nsname types.NamespacedName) { + processor.CaptureDeleteChange(obj, nsname) + Expect(processor.Process()).To(BeNil()) + } + BeforeEach(OncePerOrdered, func() { processor = state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: controllerName, @@ -432,8 +452,7 @@ var _ = Describe("ChangeProcessor", func() { ) processAndValidateGraph := func(expGraph *graph.Graph) { - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graphCfg := processor.Process() Expect(helpers.Diff(expGraph, graphCfg)).To(BeEmpty()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) } @@ -1208,8 +1227,7 @@ var _ = Describe("ChangeProcessor", func() { }) When("no upsert has occurred", func() { It("returns nil graph", func() { - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(processor.GetLatestGraph()).To(BeNil()) }) @@ -1248,8 +1266,7 @@ var _ = Describe("ChangeProcessor", func() { It("returns nil graph", func() { processor.CaptureUpsertChange(diffNsTLSSecret) - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(&graph.Graph{}, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1562,8 +1579,7 @@ var _ = Describe("ChangeProcessor", func() { gatewayclass.SupportedVersion, ) - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1680,8 +1696,7 @@ var _ = Describe("ChangeProcessor", func() { CertBundle: diffNsTLSCert, } - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -1695,8 +1710,7 @@ var _ = Describe("ChangeProcessor", func() { CertBundle: diffNsTLSCert, } - changed, graphCfg := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + graphCfg := processor.Process() Expect(graphCfg).To(BeNil()) Expect(helpers.Diff(expGraph, processor.GetLatestGraph())).To(BeEmpty()) }) @@ -2411,58 +2425,43 @@ var _ = Describe("ChangeProcessor", func() { gw = createGateway("gw", createHTTPListener()) processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + gr := processor.Process() + Expect(gr).ToNot(BeNil()) }) - testProcessChangedVal := func(expChanged state.ChangeType) { - changed, _ := processor.Process() - Expect(changed).To(Equal(expChanged)) - } - - testUpsertTriggersChange := func(obj client.Object, expChanged state.ChangeType) { - processor.CaptureUpsertChange(obj) - testProcessChangedVal(expChanged) - } - - testDeleteTriggersChange := func(obj client.Object, nsname types.NamespacedName, expChanged state.ChangeType) { - processor.CaptureDeleteChange(obj, nsname) - testProcessChangedVal(expChanged) - } - When("hr1 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1, state.ClusterStateChange) + testUpsertTriggersChange(hr1) }) }) When("a hr1 service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, state.ClusterStateChange) + testUpsertTriggersChange(hr1svc) }) }) When("a backendTLSPolicy is added for referenced service", func() { It("should trigger a change", func() { - testUpsertTriggersChange(btls, state.ClusterStateChange) + testUpsertTriggersChange(btls) }) }) When("an hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice1, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice1) }) }) When("an hr1 service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1svc, state.ClusterStateChange) + testUpsertTriggersChange(hr1svc) }) }) When("another hr1 endpoint slice is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice2) }) }) When("an endpoint slice with a missing svc name label is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(missingSvcNameSlice, state.NoChange) + testUpsertDoesNotTriggerChange(missingSvcNameSlice) }) }) When("an hr1 endpoint slice is deleted", func() { @@ -2470,7 +2469,6 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice1, types.NamespacedName{Namespace: hr1slice1.Namespace, Name: hr1slice1.Name}, - state.EndpointsOnlyChange, ) }) }) @@ -2479,13 +2477,12 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - state.EndpointsOnlyChange, ) }) }) When("the second hr1 endpoint slice is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr1slice2, state.EndpointsOnlyChange) + testUpsertTriggersChange(hr1slice2) }) }) When("hr1 is deleted", func() { @@ -2493,41 +2490,38 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr1, types.NamespacedName{Namespace: hr1.Namespace, Name: hr1.Name}, - state.ClusterStateChange, ) }) }) When("hr1 service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( hr1svc, types.NamespacedName{Namespace: hr1svc.Namespace, Name: hr1svc.Name}, - state.NoChange, ) }) }) When("the second hr1 endpoint slice is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( hr1slice2, types.NamespacedName{Namespace: hr1slice2.Namespace, Name: hr1slice2.Name}, - state.NoChange, ) }) }) When("hr2 is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr2, state.ClusterStateChange) + testUpsertTriggersChange(hr2) }) }) When("a hr3, that shares a backend service with hr2, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hr3, state.ClusterStateChange) + testUpsertTriggersChange(hr3) }) }) When("sharedSvc, a service referenced by both hr2 and hr3, is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) + testUpsertTriggersChange(sharedSvc) }) }) When("hr2 is deleted", func() { @@ -2535,7 +2529,6 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr2, types.NamespacedName{Namespace: hr2.Namespace, Name: hr2.Name}, - state.ClusterStateChange, ) }) }) @@ -2544,13 +2537,12 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - state.ClusterStateChange, ) }) }) When("sharedSvc is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(sharedSvc, state.ClusterStateChange) + testUpsertTriggersChange(sharedSvc) }) }) When("hr3 is deleted", func() { @@ -2558,62 +2550,59 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( hr3, types.NamespacedName{Namespace: hr3.Namespace, Name: hr3.Name}, - state.ClusterStateChange, ) }) }) When("sharedSvc is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( sharedSvc, types.NamespacedName{Namespace: sharedSvc.Namespace, Name: sharedSvc.Name}, - state.NoChange, ) }) }) When("a service that is not referenced by any route is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(notRefSvc, state.NoChange) + testUpsertDoesNotTriggerChange(notRefSvc) }) }) When("a route with an invalid backend ref type is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrInvalidBackendRef, state.ClusterStateChange) + testUpsertTriggersChange(hrInvalidBackendRef) }) }) When("a service with a namespace name that matches invalid backend ref is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(invalidSvc, state.NoChange) + testUpsertDoesNotTriggerChange(invalidSvc) }) }) When("an endpoint slice that is not owned by a referenced service is added", func() { It("should not trigger a change", func() { - testUpsertTriggersChange(noRefSlice, state.NoChange) + testUpsertDoesNotTriggerChange(noRefSlice) }) }) When("an endpoint slice that is not owned by a referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( noRefSlice, types.NamespacedName{Namespace: noRefSlice.Namespace, Name: noRefSlice.Name}, - state.NoChange, ) }) }) Context("processing a route with multiple rules and three unique backend services", func() { When("route is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(hrMultipleRules, state.ClusterStateChange) + testUpsertTriggersChange(hrMultipleRules) }) }) When("first referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc1) }) }) When("second referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc2, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc2) }) }) When("first referenced service is deleted", func() { @@ -2621,23 +2610,22 @@ var _ = Describe("ChangeProcessor", func() { testDeleteTriggersChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - state.ClusterStateChange, ) }) }) When("first referenced service is recreated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc1, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc1) }) }) When("third referenced service is added", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc3) }) }) When("third referenced service is updated", func() { It("should trigger a change", func() { - testUpsertTriggersChange(bazSvc3, state.ClusterStateChange) + testUpsertTriggersChange(bazSvc3) }) }) When("route is deleted", func() { @@ -2648,34 +2636,30 @@ var _ = Describe("ChangeProcessor", func() { Namespace: hrMultipleRules.Namespace, Name: hrMultipleRules.Name, }, - state.ClusterStateChange, ) }) }) When("first referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc1, types.NamespacedName{Namespace: bazSvc1.Namespace, Name: bazSvc1.Name}, - state.NoChange, ) }) }) When("second referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc2, types.NamespacedName{Namespace: bazSvc2.Namespace, Name: bazSvc2.Name}, - state.NoChange, ) }) }) When("final referenced service is deleted", func() { It("should not trigger a change", func() { - testDeleteTriggersChange( + testDeleteDoesNotTriggerChange( bazSvc3, types.NamespacedName{Namespace: bazSvc3.Namespace, Name: bazSvc3.Name}, - state.NoChange, ) }) }) @@ -2748,44 +2732,31 @@ var _ = Describe("ChangeProcessor", func() { When("a namespace is created that is not linked to a listener", func() { It("does not trigger an update", func() { - processor.CaptureUpsertChange(nsNoLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testUpsertDoesNotTriggerChange(nsNoLabels) }) }) When("a namespace is created that is linked to a listener", func() { It("triggers an update", func() { - processor.CaptureUpsertChange(ns) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(ns) }) }) When("a namespace is deleted that is not linked to a listener", func() { It("does not trigger an update", func() { - processor.CaptureDeleteChange(nsNoLabels, types.NamespacedName{Name: "no-labels"}) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testDeleteDoesNotTriggerChange(nsNoLabels, types.NamespacedName{Name: "no-labels"}) }) }) When("a namespace is deleted that is linked to a listener", func() { It("triggers an update", func() { - processor.CaptureDeleteChange(ns, types.NamespacedName{Name: "ns"}) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testDeleteTriggersChange(ns, types.NamespacedName{Name: "ns"}) }) }) When("a namespace that is not linked to a listener has its labels changed to match a listener", func() { It("triggers an update", func() { - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) - + testUpsertDoesNotTriggerChange(nsDifferentLabels) nsDifferentLabels.Labels = map[string]string{ "app": "allowed", } - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }) When( @@ -2795,9 +2766,7 @@ var _ = Describe("ChangeProcessor", func() { nsDifferentLabels.Labels = map[string]string{ "oranges": "bananas", } - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }, ) @@ -2808,9 +2777,7 @@ var _ = Describe("ChangeProcessor", func() { "oranges": "bananas", } gwChangedLabel.Generation++ - processor.CaptureUpsertChange(gwChangedLabel) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(gwChangedLabel) // After changing the gateway's labels and generation, the processor should be marked to update // the nginx configuration and build a new graph. When processor.Process() gets called, @@ -2819,29 +2786,20 @@ var _ = Describe("ChangeProcessor", func() { // the new labels on the gateway, it would not trigger a change as the namespace would no longer // be in the updated referencedNamespaces and the labels no longer match the new labels on the // gateway. - processor.CaptureUpsertChange(ns) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.NoChange)) - - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertDoesNotTriggerChange(ns) + testUpsertTriggersChange(nsDifferentLabels) }) }) When("a namespace that is not linked to a listener has its labels removed", func() { It("does not trigger an update", func() { ns.Labels = nil - processor.CaptureUpsertChange(ns) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + testUpsertDoesNotTriggerChange(ns) }) }) When("a namespace that is linked to a listener has its labels removed", func() { It("triggers an update when labels are removed", func() { nsDifferentLabels.Labels = nil - processor.CaptureUpsertChange(nsDifferentLabels) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + testUpsertTriggersChange(nsDifferentLabels) }) }) }) @@ -2883,23 +2841,23 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(np) processor.CaptureUpsertChange(paramGC) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(np)) }) It("captures changes for an NginxProxy", func() { processor.CaptureUpsertChange(npUpdated) processor.CaptureUpsertChange(paramGC) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.GatewayClass.NginxProxy.Source).To(Equal(npUpdated)) }) It("handles deletes for an NginxProxy", func() { processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.GatewayClass.NginxProxy).To(BeNil()) }) }) @@ -2957,25 +2915,25 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(np) processor.CaptureUpsertChange(paramGW) - changed, graph := processor.Process() + graph := processor.Process() + Expect(graph).ToNot(BeNil()) gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] - Expect(changed).To(Equal(state.ClusterStateChange)) Expect(gw.NginxProxy.Source).To(Equal(np)) }) It("captures changes for an NginxProxy", func() { processor.CaptureUpsertChange(npUpdated) processor.CaptureUpsertChange(paramGW) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] Expect(gw.NginxProxy.Source).To(Equal(npUpdated)) }) It("handles deletes for an NginxProxy", func() { processor.CaptureDeleteChange(np, client.ObjectKeyFromObject(np)) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) gw := graph.Gateways[types.NamespacedName{Namespace: "test", Name: "param-gw"}] Expect(gw.NginxProxy).To(BeNil()) }) @@ -2995,8 +2953,8 @@ var _ = Describe("ChangeProcessor", func() { BeforeAll(func() { processor.CaptureUpsertChange(gc) - changed, newGraph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + newGraph := processor.Process() + Expect(newGraph).ToNot(BeNil()) Expect(newGraph.GatewayClass.Source).To(Equal(gc)) Expect(newGraph.NGFPolicies).To(BeEmpty()) @@ -3126,29 +3084,28 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(obs) processor.CaptureUpsertChange(usp) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) }) When("the resource the policy references is created", func() { It("populates the graph with the policy", func() { processor.CaptureUpsertChange(gw) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(cspKey)) Expect(graph.NGFPolicies[cspKey].Source).To(Equal(csp)) Expect(graph.NGFPolicies).ToNot(HaveKey(obsKey)) processor.CaptureUpsertChange(route) - changed, graph = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph = processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(obsKey)) Expect(graph.NGFPolicies[obsKey].Source).To(Equal(obs)) processor.CaptureUpsertChange(svc) - changed, graph = processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph = processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(uspKey)) Expect(graph.NGFPolicies[uspKey].Source).To(Equal(usp)) }) @@ -3159,8 +3116,8 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(obsUpdated) processor.CaptureUpsertChange(uspUpdated) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(HaveKey(cspKey)) Expect(graph.NGFPolicies[cspKey].Source).To(Equal(cspUpdated)) Expect(graph.NGFPolicies).To(HaveKey(obsKey)) @@ -3175,8 +3132,8 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&ngfAPIv1alpha2.ObservabilityPolicy{}, client.ObjectKeyFromObject(obs)) processor.CaptureDeleteChange(&ngfAPIv1alpha1.UpstreamSettingsPolicy{}, client.ObjectKeyFromObject(usp)) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.NGFPolicies).To(BeEmpty()) }) }) @@ -3224,8 +3181,8 @@ var _ = Describe("ChangeProcessor", func() { It("handles upserts for a SnippetsFilter", func() { processor.CaptureUpsertChange(sf) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) processedSf, exists := graph.SnippetsFilters[sfNsName] Expect(exists).To(BeTrue()) @@ -3235,8 +3192,8 @@ var _ = Describe("ChangeProcessor", func() { It("captures changes for a SnippetsFilter", func() { processor.CaptureUpsertChange(sfUpdated) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) processedSf, exists := graph.SnippetsFilters[sfNsName] Expect(exists).To(BeTrue()) @@ -3246,8 +3203,8 @@ var _ = Describe("ChangeProcessor", func() { It("handles deletes for a SnippetsFilter", func() { processor.CaptureDeleteChange(sfUpdated, sfNsName) - changed, graph := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + graph := processor.Process() + Expect(graph).ToNot(BeNil()) Expect(graph.SnippetsFilters).To(BeEmpty()) }) }) @@ -3582,7 +3539,7 @@ var _ = Describe("ChangeProcessor", func() { } npUpdated = np.DeepCopy() }) - // Changing change - a change that makes processor.Process() report changed + // Changing change - a change that makes processor.Process() return a built graph // Non-changing change - a change that doesn't do that // Related resource - a K8s resource that is related to a configured Gateway API resource // Unrelated resource - a K8s resource that is not related to a configured Gateway API resource @@ -3590,7 +3547,7 @@ var _ = Describe("ChangeProcessor", func() { // Note: in these tests, we deliberately don't fully inspect the returned configuration and statuses // -- this is done in 'Normal cases of processing changes' Describe("Multiple Gateway API resource changes", Ordered, func() { - It("should report changed after multiple Upserts", func() { + It("should build graph after multiple Upserts", func() { processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw1) processor.CaptureUpsertChange(testNs) @@ -3601,11 +3558,10 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(cm) processor.CaptureUpsertChange(np) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) When("a upsert of updated resources is followed by an upsert of the same generation", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureUpsertChange(gcUpdated) processor.CaptureUpsertChange(gw1Updated) @@ -3626,22 +3582,20 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(cmUpdated) processor.CaptureUpsertChange(npUpdated) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) - It("should report changed after upserting new resources", func() { + It("should build graph after upserting new resources", func() { // we can't have a second GatewayClass, so we don't add it processor.CaptureUpsertChange(gw2) processor.CaptureUpsertChange(hr2) processor.CaptureUpsertChange(gr2) processor.CaptureUpsertChange(rg2) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) When("resources are deleted followed by upserts with the same generations", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureDeleteChange(&v1.GatewayClass{}, gcNsName) processor.CaptureDeleteChange(&v1.Gateway{}, gwNsName) @@ -3658,20 +3612,18 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(gr2) processor.CaptureUpsertChange(rg2) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) - It("should report changed after deleting resources", func() { + It("should build graph after deleting resources", func() { processor.CaptureDeleteChange(&v1.HTTPRoute{}, hr2NsName) processor.CaptureDeleteChange(&v1.HTTPRoute{}, gr2NsName) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) Describe("Deleting non-existing Gateway API resource", func() { - It("should not report changed after deleting non-existing", func() { + It("should not build graph after deleting non-existing", func() { processor.CaptureDeleteChange(&v1.GatewayClass{}, gcNsName) processor.CaptureDeleteChange(&v1.Gateway{}, gwNsName) processor.CaptureDeleteChange(&v1.HTTPRoute{}, hrNsName) @@ -3680,8 +3632,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureDeleteChange(&v1.HTTPRoute{}, gr2NsName) processor.CaptureDeleteChange(&v1beta1.ReferenceGrant{}, rgNsName) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) }) Describe("Multiple Kubernetes API resource changes", Ordered, func() { @@ -3695,31 +3646,28 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) processor.CaptureUpsertChange(barSecret) processor.CaptureUpsertChange(cm) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report changed after multiple Upserts of related resources", func() { + It("should build graph after multiple Upserts of related resources", func() { processor.CaptureUpsertChange(svc) processor.CaptureUpsertChange(slice) processor.CaptureUpsertChange(ns) processor.CaptureUpsertChange(secretUpdated) processor.CaptureUpsertChange(cmUpdated) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report not changed after multiple Upserts of unrelated resources", func() { + It("should not build graph after multiple Upserts of unrelated resources", func() { processor.CaptureUpsertChange(unrelatedSvc) processor.CaptureUpsertChange(unrelatedSlice) processor.CaptureUpsertChange(unrelatedNS) processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) When("upserts of related resources are followed by upserts of unrelated resources", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureUpsertChange(barSvc) processor.CaptureUpsertChange(barSlice) @@ -3734,12 +3682,11 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) When("deletes of related resources are followed by upserts of unrelated resources", func() { - It("should report changed", func() { + It("should build graph", func() { // these are changing changes processor.CaptureDeleteChange(&apiv1.Service{}, svcNsName) processor.CaptureDeleteChange(&discoveryV1.EndpointSlice{}, sliceNsName) @@ -3754,13 +3701,12 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) }) }) Describe("Multiple Kubernetes API and Gateway API resource changes", Ordered, func() { - It("should report changed after multiple Upserts of new and related resources", func() { + It("should build graph after multiple Upserts of new and related resources", func() { // new Gateway API resources processor.CaptureUpsertChange(gc) processor.CaptureUpsertChange(gw1) @@ -3777,10 +3723,9 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(secret) processor.CaptureUpsertChange(cm) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }) - It("should report not changed after multiple Upserts of unrelated resources", func() { + It("should not build graph after multiple Upserts of unrelated resources", func() { // unrelated Kubernetes API resources processor.CaptureUpsertChange(unrelatedSvc) processor.CaptureUpsertChange(unrelatedSlice) @@ -3788,10 +3733,9 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.NoChange)) + Expect(processor.Process()).To(BeNil()) }) - It("should report changed after upserting changed resources followed by upserting unrelated resources", + It("should build graph after upserting changed resources followed by upserting unrelated resources", func() { // these are changing changes processor.CaptureUpsertChange(gcUpdated) @@ -3808,8 +3752,7 @@ var _ = Describe("ChangeProcessor", func() { processor.CaptureUpsertChange(unrelatedSecret) processor.CaptureUpsertChange(unrelatedCM) - changed, _ := processor.Process() - Expect(changed).To(Equal(state.ClusterStateChange)) + Expect(processor.Process()).ToNot(BeNil()) }, ) }) diff --git a/internal/mode/static/state/dataplane/configuration.go b/internal/mode/static/state/dataplane/configuration.go index 0512876405..e0de92d14d 100644 --- a/internal/mode/static/state/dataplane/configuration.go +++ b/internal/mode/static/state/dataplane/configuration.go @@ -33,11 +33,10 @@ func BuildConfiguration( g *graph.Graph, gateway *graph.Gateway, serviceResolver resolver.ServiceResolver, - configVersion int, plus bool, ) Configuration { if g.GatewayClass == nil || !g.GatewayClass.Valid || gateway == nil { - config := GetDefaultConfiguration(g, configVersion, gateway) + config := GetDefaultConfiguration(g, gateway) if plus { config.NginxPlus = buildNginxPlus(gateway) } @@ -70,7 +69,6 @@ func BuildConfiguration( StreamUpstreams: buildStreamUpstreams(ctx, gateway, serviceResolver, baseHTTPConfig.IPFamily), BackendGroups: backendGroups, SSLKeyPairs: buildSSLKeyPairs(g.ReferencedSecrets, gateway.Listeners), - Version: configVersion, CertBundles: buildCertBundles( buildRefCertificateBundles(g.ReferencedSecrets, g.ReferencedCaCertConfigMaps), backendGroups, @@ -720,6 +718,12 @@ func buildUpstreams( for _, up := range uniqueUpstreams { upstreams = append(upstreams, up) } + + // Preserve order so that this doesn't trigger an unnecessary reload. + sort.Slice(upstreams, func(i, j int) bool { + return upstreams[i].Name < upstreams[j].Name + }) + return upstreams } @@ -1131,9 +1135,8 @@ func buildNginxPlus(gateway *graph.Gateway) NginxPlus { return nginxPlusSettings } -func GetDefaultConfiguration(g *graph.Graph, configVersion int, gateway *graph.Gateway) Configuration { +func GetDefaultConfiguration(g *graph.Graph, gateway *graph.Gateway) Configuration { return Configuration{ - Version: configVersion, Logging: buildLogging(gateway), NginxPlus: NginxPlus{}, AuxiliarySecrets: buildAuxiliarySecrets(g.PlusSecrets), diff --git a/internal/mode/static/state/dataplane/configuration_test.go b/internal/mode/static/state/dataplane/configuration_test.go index 7f3a50795d..78e4869c59 100644 --- a/internal/mode/static/state/dataplane/configuration_test.go +++ b/internal/mode/static/state/dataplane/configuration_test.go @@ -2531,7 +2531,6 @@ func TestBuildConfiguration(t *testing.T) { test.graph, test.graph.Gateways[gatewayNsName], fakeResolver, - 1, false, ) @@ -2541,7 +2540,6 @@ func TestBuildConfiguration(t *testing.T) { g.Expect(result.SSLServers).To(ConsistOf(test.expConf.SSLServers)) g.Expect(result.TLSPassthroughServers).To(ConsistOf(test.expConf.TLSPassthroughServers)) g.Expect(result.SSLKeyPairs).To(Equal(test.expConf.SSLKeyPairs)) - g.Expect(result.Version).To(Equal(1)) g.Expect(result.CertBundles).To(Equal(test.expConf.CertBundles)) g.Expect(result.Telemetry).To(Equal(test.expConf.Telemetry)) g.Expect(result.BaseHTTPConfig).To(Equal(test.expConf.BaseHTTPConfig)) @@ -2648,7 +2646,6 @@ func TestBuildConfiguration_Plus(t *testing.T) { test.graph, test.graph.Gateways[gatewayNsName], fakeResolver, - 1, true, ) @@ -2658,7 +2655,6 @@ func TestBuildConfiguration_Plus(t *testing.T) { g.Expect(result.SSLServers).To(ConsistOf(test.expConf.SSLServers)) g.Expect(result.TLSPassthroughServers).To(ConsistOf(test.expConf.TLSPassthroughServers)) g.Expect(result.SSLKeyPairs).To(Equal(test.expConf.SSLKeyPairs)) - g.Expect(result.Version).To(Equal(1)) g.Expect(result.CertBundles).To(Equal(test.expConf.CertBundles)) g.Expect(result.Telemetry).To(Equal(test.expConf.Telemetry)) g.Expect(result.BaseHTTPConfig).To(Equal(test.expConf.BaseHTTPConfig)) diff --git a/internal/mode/static/state/dataplane/types.go b/internal/mode/static/state/dataplane/types.go index 55e5476f51..975edb4c33 100644 --- a/internal/mode/static/state/dataplane/types.go +++ b/internal/mode/static/state/dataplane/types.go @@ -54,8 +54,6 @@ type Configuration struct { NginxPlus NginxPlus // BaseHTTPConfig holds the configuration options at the http context. BaseHTTPConfig BaseHTTPConfig - // Version represents the version of the generated configuration. - Version int } // SSLKeyPairID is a unique identifier for a SSLKeyPair. diff --git a/internal/mode/static/state/statefakes/fake_change_processor.go b/internal/mode/static/state/statefakes/fake_change_processor.go index b3de756b60..c88a31ce01 100644 --- a/internal/mode/static/state/statefakes/fake_change_processor.go +++ b/internal/mode/static/state/statefakes/fake_change_processor.go @@ -33,17 +33,15 @@ type FakeChangeProcessor struct { getLatestGraphReturnsOnCall map[int]struct { result1 *graph.Graph } - ProcessStub func() (state.ChangeType, *graph.Graph) + ProcessStub func() *graph.Graph processMutex sync.RWMutex processArgsForCall []struct { } processReturns struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph } processReturnsOnCall map[int]struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph } invocations map[string][][]interface{} invocationsMutex sync.RWMutex @@ -167,7 +165,7 @@ func (fake *FakeChangeProcessor) GetLatestGraphReturnsOnCall(i int, result1 *gra }{result1} } -func (fake *FakeChangeProcessor) Process() (state.ChangeType, *graph.Graph) { +func (fake *FakeChangeProcessor) Process() *graph.Graph { fake.processMutex.Lock() ret, specificReturn := fake.processReturnsOnCall[len(fake.processArgsForCall)] fake.processArgsForCall = append(fake.processArgsForCall, struct { @@ -180,9 +178,9 @@ func (fake *FakeChangeProcessor) Process() (state.ChangeType, *graph.Graph) { return stub() } if specificReturn { - return ret.result1, ret.result2 + return ret.result1 } - return fakeReturns.result1, fakeReturns.result2 + return fakeReturns.result1 } func (fake *FakeChangeProcessor) ProcessCallCount() int { @@ -191,36 +189,33 @@ func (fake *FakeChangeProcessor) ProcessCallCount() int { return len(fake.processArgsForCall) } -func (fake *FakeChangeProcessor) ProcessCalls(stub func() (state.ChangeType, *graph.Graph)) { +func (fake *FakeChangeProcessor) ProcessCalls(stub func() *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = stub } -func (fake *FakeChangeProcessor) ProcessReturns(result1 state.ChangeType, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturns(result1 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil fake.processReturns = struct { - result1 state.ChangeType - result2 *graph.Graph - }{result1, result2} + result1 *graph.Graph + }{result1} } -func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 state.ChangeType, result2 *graph.Graph) { +func (fake *FakeChangeProcessor) ProcessReturnsOnCall(i int, result1 *graph.Graph) { fake.processMutex.Lock() defer fake.processMutex.Unlock() fake.ProcessStub = nil if fake.processReturnsOnCall == nil { fake.processReturnsOnCall = make(map[int]struct { - result1 state.ChangeType - result2 *graph.Graph + result1 *graph.Graph }) } fake.processReturnsOnCall[i] = struct { - result1 state.ChangeType - result2 *graph.Graph - }{result1, result2} + result1 *graph.Graph + }{result1} } func (fake *FakeChangeProcessor) Invocations() map[string][][]interface{} { diff --git a/internal/mode/static/state/store.go b/internal/mode/static/state/store.go index 58bf28216a..910f257c90 100644 --- a/internal/mode/static/state/store.go +++ b/internal/mode/static/state/store.go @@ -3,7 +3,6 @@ package state import ( "fmt" - discoveryV1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -188,7 +187,7 @@ type changeTrackingUpdater struct { extractGVK kinds.MustExtractGVK supportedGVKs gvkList - changeType ChangeType + changed bool } func newChangeTrackingUpdater( @@ -221,7 +220,6 @@ func newChangeTrackingUpdater( extractGVK: extractGVK, supportedGVKs: supportedGVKs, stateChangedPredicates: stateChangedPredicates, - changeType: NoChange, } } @@ -255,7 +253,7 @@ func (s *changeTrackingUpdater) Upsert(obj client.Object) { changingUpsert := s.upsert(obj) - s.setChangeType(obj, changingUpsert) + s.changed = s.changed || changingUpsert } func (s *changeTrackingUpdater) delete(objType ngftypes.ObjectType, nsname types.NamespacedName) (changed bool) { @@ -282,28 +280,13 @@ func (s *changeTrackingUpdater) Delete(objType ngftypes.ObjectType, nsname types changingDelete := s.delete(objType, nsname) - s.setChangeType(objType, changingDelete) + s.changed = s.changed || changingDelete } -// getAndResetChangedStatus returns the type of change that occurred based on the previous updates (Upserts/Deletes). -// It also resets the changed status to NoChange. -func (s *changeTrackingUpdater) getAndResetChangedStatus() ChangeType { - changeType := s.changeType - s.changeType = NoChange - return changeType -} - -// setChangeType determines and sets the type of change that occurred. -// - if no change occurred on this object, then keep the changeType as-is (could've been set by another object event) -// - if changeType is already a ClusterStateChange, then we don't need to update the value -// - otherwise, if we are processing an Endpoint update, then this is an EndpointsOnlyChange changeType -// - otherwise, this is a different object, and is a ClusterStateChange changeType. -func (s *changeTrackingUpdater) setChangeType(obj client.Object, changed bool) { - if changed && s.changeType != ClusterStateChange { - if _, ok := obj.(*discoveryV1.EndpointSlice); ok { - s.changeType = EndpointsOnlyChange - } else { - s.changeType = ClusterStateChange - } - } +// getAndResetChangedStatus returns if a change occurred based on the previous updates (Upserts/Deletes). +// It also resets the changed status to false. +func (s *changeTrackingUpdater) getAndResetChangedStatus() bool { + changed := s.changed + s.changed = false + return changed } diff --git a/internal/mode/static/state/store_test.go b/internal/mode/static/state/store_test.go deleted file mode 100644 index 54e60264fa..0000000000 --- a/internal/mode/static/state/store_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package state - -import ( - "testing" - - . "github.com/onsi/gomega" - discoveryV1 "k8s.io/api/discovery/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - v1 "sigs.k8s.io/gateway-api/apis/v1" -) - -//nolint:paralleltest,tparallel // Order matters for these tests. -func TestSetChangeType(t *testing.T) { - t.Parallel() - ctu := newChangeTrackingUpdater(nil, nil) - - // Order matters for these cases. - tests := []struct { - obj client.Object - name string - exp ChangeType - changed bool - }{ - { - name: "no change", - exp: NoChange, - }, - { - name: "endpoint object", - obj: &discoveryV1.EndpointSlice{}, - changed: true, - exp: EndpointsOnlyChange, - }, - { - name: "non-endpoint object", - obj: &v1.HTTPRoute{}, - changed: true, - exp: ClusterStateChange, - }, - { - name: "changeType was previously set to ClusterStateChange", - obj: &discoveryV1.EndpointSlice{}, - changed: true, - exp: ClusterStateChange, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - ctu.setChangeType(test.obj, test.changed) - g.Expect(ctu.changeType).To(Equal(test.exp)) - }) - } -} diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index cfa3614df7..8277515f56 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -34,7 +34,7 @@ type GraphGetter interface { // ConfigurationGetter gets the latest Configuration. type ConfigurationGetter interface { - GetLatestConfiguration() *dataplane.Configuration + GetLatestConfiguration() []*dataplane.Configuration } // Data is telemetry data. @@ -192,7 +192,7 @@ func collectGraphResourceCount( configurationGetter ConfigurationGetter, ) NGFResourceCounts { ngfResourceCounts := NGFResourceCounts{} - cfg := configurationGetter.GetLatestConfiguration() + configs := configurationGetter.GetLatestConfiguration() ngfResourceCounts.GatewayClassCount = int64(len(g.IgnoredGatewayClasses)) if g.GatewayClass != nil { @@ -209,7 +209,7 @@ func collectGraphResourceCount( ngfResourceCounts.SecretCount = int64(len(g.ReferencedSecrets)) ngfResourceCounts.ServiceCount = int64(len(g.ReferencedServices)) - if cfg != nil { + for _, cfg := range configs { for _, upstream := range cfg.Upstreams { if upstream.ErrorMsg == "" { ngfResourceCounts.EndpointCount += int64(len(upstream.Endpoints)) diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index 4620d186ce..ce75e47c02 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -183,7 +183,7 @@ var _ = Describe("Collector", Ordered, func() { fakeConfigurationGetter = &telemetryfakes.FakeConfigurationGetter{} fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) dataCollector = telemetry.NewDataCollectorImpl(telemetry.DataCollectorConfig{ K8sClientReader: k8sClientReader, @@ -368,31 +368,47 @@ var _ = Describe("Collector", Ordered, func() { }, } - config := &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "upstream1", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, - }, { - Address: "endpoint2", - Port: 80, - }, { - Address: "endpoint3", - Port: 80, + configs := []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream1", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, { + Address: "endpoint2", + Port: 80, + }, { + Address: "endpoint3", + Port: 80, + }, + }, + }, + { + Name: "upstream2", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, }, }, }, - { - Name: "upstream2", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, + }, + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream3", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint4", + Port: 80, + }, }, }, }, @@ -400,7 +416,7 @@ var _ = Describe("Collector", Ordered, func() { } fakeGraphGetter.GetLatestGraphReturns(graph) - fakeConfigurationGetter.GetLatestConfigurationReturns(config) + fakeConfigurationGetter.GetLatestConfigurationReturns(configs) expData.ClusterNodeCount = 3 expData.NGFResourceCounts = telemetry.NGFResourceCounts{ @@ -410,7 +426,7 @@ var _ = Describe("Collector", Ordered, func() { TLSRouteCount: 3, SecretCount: 3, ServiceCount: 3, - EndpointCount: 4, + EndpointCount: 5, GRPCRouteCount: 2, BackendTLSPolicyCount: 3, GatewayAttachedClientSettingsPolicyCount: 1, @@ -569,7 +585,7 @@ var _ = Describe("Collector", Ordered, func() { Describe("NGF resource count collector", func() { var ( graph1 *graph.Graph - config1, invalidUpstreamsConfig *dataplane.Configuration + config1, invalidUpstreamsConfig []*dataplane.Configuration ) BeforeAll(func() { @@ -626,43 +642,47 @@ var _ = Describe("Collector", Ordered, func() { }, } - config1 = &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "upstream1", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, + config1 = []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "upstream1", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, }, }, }, }, } - invalidUpstreamsConfig = &dataplane.Configuration{ - Upstreams: []dataplane.Upstream{ - { - Name: "invalidUpstream", - ErrorMsg: "there is an error here", - Endpoints: []resolver.Endpoint{ - { - Address: "endpoint1", - Port: 80, - }, { - Address: "endpoint2", - Port: 80, - }, { - Address: "endpoint3", - Port: 80, + invalidUpstreamsConfig = []*dataplane.Configuration{ + { + Upstreams: []dataplane.Upstream{ + { + Name: "invalidUpstream", + ErrorMsg: "there is an error here", + Endpoints: []resolver.Endpoint{ + { + Address: "endpoint1", + Port: 80, + }, { + Address: "endpoint2", + Port: 80, + }, { + Address: "endpoint3", + Port: 80, + }, }, }, - }, - { - Name: "emptyUpstream", - ErrorMsg: "", - Endpoints: []resolver.Endpoint{}, + { + Name: "emptyUpstream", + ErrorMsg: "", + Endpoints: []resolver.Endpoint{}, + }, }, }, } @@ -671,7 +691,7 @@ var _ = Describe("Collector", Ordered, func() { When("collecting NGF resource counts", func() { It("collects correct data for graph with no resources", func(ctx SpecContext) { fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) expData.NGFResourceCounts = telemetry.NGFResourceCounts{} @@ -721,7 +741,7 @@ var _ = Describe("Collector", Ordered, func() { When("it encounters an error while collecting data", func() { BeforeEach(func() { fakeGraphGetter.GetLatestGraphReturns(&graph.Graph{}) - fakeConfigurationGetter.GetLatestConfigurationReturns(&dataplane.Configuration{}) + fakeConfigurationGetter.GetLatestConfigurationReturns(nil) }) It("should error on nil latest graph", func(ctx SpecContext) { expectedError := errors.New("failed to collect telemetry data: latest graph cannot be nil") diff --git a/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go b/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go index a56fce8f7b..8650078dc7 100644 --- a/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go +++ b/internal/mode/static/telemetry/telemetryfakes/fake_configuration_getter.go @@ -9,21 +9,21 @@ import ( ) type FakeConfigurationGetter struct { - GetLatestConfigurationStub func() *dataplane.Configuration + GetLatestConfigurationStub func() []*dataplane.Configuration getLatestConfigurationMutex sync.RWMutex getLatestConfigurationArgsForCall []struct { } getLatestConfigurationReturns struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration } getLatestConfigurationReturnsOnCall map[int]struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeConfigurationGetter) GetLatestConfiguration() *dataplane.Configuration { +func (fake *FakeConfigurationGetter) GetLatestConfiguration() []*dataplane.Configuration { fake.getLatestConfigurationMutex.Lock() ret, specificReturn := fake.getLatestConfigurationReturnsOnCall[len(fake.getLatestConfigurationArgsForCall)] fake.getLatestConfigurationArgsForCall = append(fake.getLatestConfigurationArgsForCall, struct { @@ -47,32 +47,32 @@ func (fake *FakeConfigurationGetter) GetLatestConfigurationCallCount() int { return len(fake.getLatestConfigurationArgsForCall) } -func (fake *FakeConfigurationGetter) GetLatestConfigurationCalls(stub func() *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationCalls(stub func() []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = stub } -func (fake *FakeConfigurationGetter) GetLatestConfigurationReturns(result1 *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationReturns(result1 []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = nil fake.getLatestConfigurationReturns = struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }{result1} } -func (fake *FakeConfigurationGetter) GetLatestConfigurationReturnsOnCall(i int, result1 *dataplane.Configuration) { +func (fake *FakeConfigurationGetter) GetLatestConfigurationReturnsOnCall(i int, result1 []*dataplane.Configuration) { fake.getLatestConfigurationMutex.Lock() defer fake.getLatestConfigurationMutex.Unlock() fake.GetLatestConfigurationStub = nil if fake.getLatestConfigurationReturnsOnCall == nil { fake.getLatestConfigurationReturnsOnCall = make(map[int]struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }) } fake.getLatestConfigurationReturnsOnCall[i] = struct { - result1 *dataplane.Configuration + result1 []*dataplane.Configuration }{result1} } diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index 7a77c0dea9..1f6293e6b5 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -104,6 +104,29 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" } }) + Context("verify working traffic", func() { + It("should return a 200 response for HTTPRoutes", func() { + baseCoffeeURL := baseURL + "/coffee" + baseTeaURL := baseURL + "/tea" + + Eventually( + func() error { + return expectRequestToSucceed(baseCoffeeURL, address, "URI: /coffee") + }). + WithTimeout(timeoutConfig.RequestTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + + Eventually( + func() error { + return expectRequestToSucceed(baseTeaURL, address, "URI: /tea") + }). + WithTimeout(timeoutConfig.RequestTimeout). + WithPolling(500 * time.Millisecond). + Should(Succeed()) + }) + }) + Context("nginx config", func() { var conf *framework.Payload filePrefix := fmt.Sprintf("/etc/nginx/includes/ClientSettingsPolicy_%s", namespace) From 9f6cab3b9784a3f1e0bf31f0657067159033d874 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 30 Apr 2025 11:59:58 -0600 Subject: [PATCH 26/32] CP/DP Split: Remove prometheus logger (#3349) The prometheus logger is no longer needed since we don't collect nginx metrics in the control plane anymore. Also updated agent dependencies to fix the broken build. --- go.mod | 15 ++-- go.sum | 80 +++++++++---------- internal/mode/static/log_level_setters.go | 40 ---------- .../mode/static/log_level_setters_test.go | 16 ---- internal/mode/static/manager.go | 7 +- tests/go.mod | 9 +-- tests/go.sum | 20 ++--- tests/suite/nginxgateway_test.go | 5 +- 8 files changed, 58 insertions(+), 134 deletions(-) diff --git a/go.mod b/go.mod index 29b388c153..561df41494 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,15 @@ module github.com/nginx/nginx-gateway-fabric go 1.24.2 require ( - github.com/fsnotify/fsnotify v1.8.0 - github.com/go-kit/log v0.2.1 + github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 - github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa + github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e github.com/nginx/telemetry-exporter v0.1.4 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.1 + github.com/prometheus/client_golang v1.22.0 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 go.opentelemetry.io/otel v1.35.0 @@ -32,7 +30,7 @@ require ( ) require ( - buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 // indirect + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -40,7 +38,6 @@ require ( github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -57,7 +54,6 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -65,7 +61,8 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect diff --git a/go.sum b/go.sum index 52014ba372..c49cce28ac 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1 h1:2IGhRovxlsOIQgx2ekZWo4wTPAYpck41+18ICxs37is= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240401165935-b983156c5e99.1/go.mod h1:Tgn5bgL220vkFOI0KPStlcClPeOJzAv4uT+V8JXGUnw= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1 h1:4erM3WLgEG/HIBrpBDmRbs1puhd7p0z7kNXDuhHthwM= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1/go.mod h1:novQBstnxcGpfKf8qGRATqn1anQKwMJIbH5Q581jibU= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -25,12 +25,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= +github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -39,14 +41,10 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -64,16 +62,16 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY= +github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -96,8 +94,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -106,8 +104,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= @@ -135,8 +133,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa h1:PvNHtYSv/glSxDkovCHJsDlNFHkvzoH2wAr6WtSNYcM= -github.com/nginx/agent/v3 v3.0.0-20250120091728-0f0c0e2478aa/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8= +github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e h1:Cw/fGXymS9ytwusxE7TaySDovKH+yQuWRI0vLJ4rJxU= +github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e/go.mod h1:O/31aKtii/mpiZmFGMcTNDoLtKzwTyTXOBMSRkMaPvs= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o= @@ -147,8 +145,8 @@ github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -160,14 +158,14 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -177,10 +175,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= -github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= -github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE= +github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= @@ -201,8 +197,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= -github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/testcontainers/testcontainers-go v0.36.0 h1:YpffyLuHtdp5EUsI5mT4sRw8GZhO/5ozyDT1xWGXt00= +github.com/testcontainers/testcontainers-go v0.36.0/go.mod h1:yk73GVJ0KUZIHUtFna6MO7QS144qYpoY8lEEtU9Hed0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -215,8 +211,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= @@ -227,8 +223,8 @@ go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/ go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= @@ -246,8 +242,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= @@ -296,8 +292,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -309,8 +303,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= diff --git a/internal/mode/static/log_level_setters.go b/internal/mode/static/log_level_setters.go index 072c760e44..765cb80e0f 100644 --- a/internal/mode/static/log_level_setters.go +++ b/internal/mode/static/log_level_setters.go @@ -3,8 +3,6 @@ package static import ( "errors" - "github.com/go-kit/log" - "github.com/prometheus/common/promlog" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -63,41 +61,3 @@ func (z zapLogLevelSetter) SetLevel(level string) error { func (z zapLogLevelSetter) Enabled(level zapcore.Level) bool { return z.atomicLevel.Enabled(level) } - -// leveledPrometheusLogger is a leveled prometheus logger. -// This interface is required because the promlog.NewDynamic returns an unexported type *logger. -type leveledPrometheusLogger interface { - log.Logger - SetLevel(level *promlog.AllowedLevel) -} - -type promLogLevelSetter struct { - logger leveledPrometheusLogger -} - -func newPromLogLevelSetter(logger leveledPrometheusLogger) promLogLevelSetter { - return promLogLevelSetter{logger: logger} -} - -func newLeveledPrometheusLogger() (leveledPrometheusLogger, error) { - logFormat := &promlog.AllowedFormat{} - - if err := logFormat.Set("json"); err != nil { - return nil, err - } - - logConfig := &promlog.Config{Format: logFormat} - logger := promlog.NewDynamic(logConfig) - - return logger, nil -} - -func (p promLogLevelSetter) SetLevel(level string) error { - al := &promlog.AllowedLevel{} - if err := al.Set(level); err != nil { - return err - } - - p.logger.SetLevel(al) - return nil -} diff --git a/internal/mode/static/log_level_setters_test.go b/internal/mode/static/log_level_setters_test.go index b9dce5ae71..844b5a8f91 100644 --- a/internal/mode/static/log_level_setters_test.go +++ b/internal/mode/static/log_level_setters_test.go @@ -58,19 +58,3 @@ func TestZapLogLevelSetter_SetLevel(t *testing.T) { g.Expect(zapSetter.SetLevel("invalid")).ToNot(Succeed()) } - -func TestPromLogLevelSetter_SetLevel(t *testing.T) { - t.Parallel() - g := NewWithT(t) - - logger, err := newLeveledPrometheusLogger() - g.Expect(err).ToNot(HaveOccurred()) - - promSetter := newPromLogLevelSetter(logger) - - g.Expect(promSetter.SetLevel("error")).To(Succeed()) - g.Expect(promSetter.SetLevel("info")).To(Succeed()) - g.Expect(promSetter.SetLevel("debug")).To(Succeed()) - - g.Expect(promSetter.SetLevel("invalid")).ToNot(Succeed()) -} diff --git a/internal/mode/static/manager.go b/internal/mode/static/manager.go index 0c78a8f10d..1af84ebe23 100644 --- a/internal/mode/static/manager.go +++ b/internal/mode/static/manager.go @@ -107,12 +107,7 @@ func StartManager(cfg config.Config) error { recorderName := fmt.Sprintf("nginx-gateway-fabric-%s", cfg.GatewayClassName) recorder := mgr.GetEventRecorderFor(recorderName) - promLogger, err := newLeveledPrometheusLogger() - if err != nil { - return fmt.Errorf("error creating leveled prometheus logger: %w", err) - } - - logLevelSetter := newMultiLogLevelSetter(newZapLogLevelSetter(cfg.AtomicLevel), newPromLogLevelSetter(promLogger)) + logLevelSetter := newMultiLogLevelSetter(newZapLogLevelSetter(cfg.AtomicLevel)) ctx := ctlr.SetupSignalHandler() diff --git a/tests/go.mod b/tests/go.mod index cf265e2fae..fd2c7a72ea 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -8,8 +8,8 @@ require ( github.com/nginx/nginx-gateway-fabric v0.0.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.1 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.62.0 github.com/tsenart/vegeta/v12 v12.12.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -26,7 +26,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -45,7 +45,6 @@ require ( github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/miekg/dns v1.1.65 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -56,7 +55,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/testify v1.10.0 // indirect diff --git a/tests/go.sum b/tests/go.sum index f32304f508..82c4ed9185 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -18,8 +18,8 @@ github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -67,8 +67,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -103,14 +103,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8= diff --git a/tests/suite/nginxgateway_test.go b/tests/suite/nginxgateway_test.go index a2d44e3a77..90e1edb76c 100644 --- a/tests/suite/nginxgateway_test.go +++ b/tests/suite/nginxgateway_test.go @@ -243,10 +243,7 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f return false } - return strings.Contains( - logs, - "\"current\":\"debug\",\"msg\":\"Log level changed\",\"prev\":\"info\"", - ) + return strings.Contains(logs, "\"level\":\"debug\"") }).WithTimeout(timeoutConfig.GetTimeout). WithPolling(500 * time.Millisecond). Should(BeTrue()) From 87f44ff4ff763276132cf7b8dc235268b72bf66e Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 30 Apr 2025 16:01:33 -0600 Subject: [PATCH 27/32] CP/DP Split: Support configuring NodePorts (#3343) Problem: Now that the control plane provisions the NGINX Service, users can't set specific NodePorts values. Solution: Allow users to specify NodePorts in the helm chart (globally) and in the NginxProxy resource. --- apis/v1alpha2/nginxproxy_types.go | 22 ++ apis/v1alpha2/zz_generated.deepcopy.go | 20 ++ charts/nginx-gateway-fabric/README.md | 15 +- .../templates/_helpers.tpl | 13 ++ .../templates/nginxproxy.yaml | 4 +- .../nginx-gateway-fabric/values.schema.json | 61 ++++- charts/nginx-gateway-fabric/values.yaml | 42 +++- .../bases/gateway.nginx.org_nginxproxies.yaml | 30 +++ deploy/crds.yaml | 30 +++ internal/mode/static/handler.go | 20 +- internal/mode/static/handler_test.go | 32 ++- internal/mode/static/provisioner/handler.go | 57 ++++- .../mode/static/provisioner/handler_test.go | 33 ++- internal/mode/static/provisioner/objects.go | 9 + .../mode/static/provisioner/objects_test.go | 21 +- .../mode/static/provisioner/provisioner.go | 15 +- internal/mode/static/provisioner/store.go | 74 +++++- .../mode/static/provisioner/store_test.go | 218 ++++++++++++++++++ 18 files changed, 659 insertions(+), 57 deletions(-) diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index 7c716824fa..13013d81cf 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -533,6 +533,13 @@ type ServiceSpec struct { // // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + + // NodePorts are the list of NodePorts to expose on the NGINX data plane service. + // Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + // The default NodePort range enforced by Kubernetes is 30000-32767. + // + // +optional + NodePorts []NodePort `json:"nodePorts,omitempty"` } // ServiceType describes ingress method for the Service. @@ -569,3 +576,18 @@ const ( // (dropping the traffic if there are no local endpoints). ExternalTrafficPolicyLocal ExternalTrafficPolicy = ExternalTrafficPolicy(corev1.ServiceExternalTrafficPolicyLocal) ) + +// NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST +// map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort +// automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. +type NodePort struct { + // Port is the NodePort to expose. + // kubebuilder:validation:Minimum=1 + // kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` + + // ListenerPort is the Gateway listener port that this NodePort maps to. + // kubebuilder:validation:Minimum=1 + // kubebuilder:validation:Maximum=65535 + ListenerPort int32 `json:"listenerPort"` +} diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index 60bf2cd9cd..bd6d81bca2 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -328,6 +328,21 @@ func (in *NginxProxySpec) DeepCopy() *NginxProxySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePort) DeepCopyInto(out *NodePort) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePort. +func (in *NodePort) DeepCopy() *NodePort { + if in == nil { + return nil + } + out := new(NodePort) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObservabilityPolicy) DeepCopyInto(out *ObservabilityPolicy) { *out = *in @@ -547,6 +562,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.NodePorts != nil { + in, out := &in.NodePorts, &out.NodePorts + *out = make([]NodePort, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index f2141de4d2..8ed532ea95 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -258,19 +258,24 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `certGenerator.overwrite` | Overwrite existing TLS Secrets on startup. | bool | `false` | | `certGenerator.serverTLSSecretName` | The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"server-tls"` | | `clusterDomain` | The DNS cluster domain of your Kubernetes cluster. | string | `"cluster.local"` | -| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | -| `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. | object | `{}` | -| `nginx.container` | The container configuration for the NGINX container. | object | `{}` | +| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"annotations":{},"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | +| `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | +| `nginx.container` | The container configuration for the NGINX container. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.debug` | Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. | bool | `false` | | `nginx.image.repository` | The NGINX image to use. | string | `"ghcr.io/nginx/nginx-gateway-fabric/nginx"` | | `nginx.imagePullSecret` | The name of the secret containing docker registry credentials. Secret must exist in the same namespace as the helm release. The control plane will copy this secret into any namespace where NGINX is deployed. | string | `""` | | `nginx.imagePullSecrets` | A list of secret names containing docker registry credentials. Secrets must exist in the same namespace as the helm release. The control plane will copy these secrets into any namespace where NGINX is deployed. | list | `[]` | | `nginx.kind` | The kind of NGINX deployment. | string | `"deployment"` | | `nginx.plus` | Is NGINX Plus image being used. | bool | `false` | -| `nginx.pod` | The pod configuration for the NGINX data plane pod. | object | `{}` | +| `nginx.pod` | The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.replicas` | The number of replicas of the NGINX Deployment. | int | `1` | -| `nginx.service` | The service configuration for the NGINX data plane. | object | `{"externalTrafficPolicy":"Local","type":"LoadBalancer"}` | +| `nginx.service` | The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{"annotations":{},"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"}` | +| `nginx.service.annotations` | The annotations of the NGINX data plane service. | object | `{}` | | `nginx.service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | +| `nginx.service.loadBalancerClass` | LoadBalancerClass is the class of the load balancer implementation this Service belongs to. Requires nginx.service.type set to LoadBalancer. | string | `""` | +| `nginx.service.loadBalancerIP` | The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. | string | `""` | +| `nginx.service.loadBalancerSourceRanges` | The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. | list | `[]` | +| `nginx.service.nodePorts` | A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767. | list | `[]` | | `nginx.service.type` | The type of service to create for the NGINX data plane. | string | `"LoadBalancer"` | | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | diff --git a/charts/nginx-gateway-fabric/templates/_helpers.tpl b/charts/nginx-gateway-fabric/templates/_helpers.tpl index 2a137d5fbd..01155eb707 100644 --- a/charts/nginx-gateway-fabric/templates/_helpers.tpl +++ b/charts/nginx-gateway-fabric/templates/_helpers.tpl @@ -91,3 +91,16 @@ Expand leader election lock name. {{- printf "%s-%s" (include "nginx-gateway.fullname" .) "leader-election" -}} {{- end -}} {{- end -}} + +{{/* +Filters out empty fields from a struct. +*/}} +{{- define "filterEmptyFields" -}} +{{- $result := dict }} +{{- range $key, $value := . }} + {{- if and (not (empty $value)) (not (and (kindIs "slice" $value) (eq (len $value) 0))) }} + {{- $result = merge $result (dict $key $value) }} + {{- end }} +{{- end }} +{{- $result | toYaml }} +{{- end }} diff --git a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml index f77630fe95..56e4de6943 100644 --- a/charts/nginx-gateway-fabric/templates/nginxproxy.yaml +++ b/charts/nginx-gateway-fabric/templates/nginxproxy.yaml @@ -29,5 +29,7 @@ spec: {{- end }} {{- if .Values.nginx.service }} service: - {{- toYaml .Values.nginx.service | nindent 6 }} + {{- with .Values.nginx.service }} + {{- include "filterEmptyFields" . | nindent 6 }} + {{- end }} {{- end }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index ae9d507656..806512fce8 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -53,7 +53,7 @@ "description": "The nginx section contains the configuration for all NGINX data plane deployments\ninstalled by the NGINX Gateway Fabric control plane.", "properties": { "config": { - "description": "The configuration for the data plane that is contained in the NginxProxy resource.", + "description": "The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways\nmanaged by this instance of NGINX Gateway Fabric.", "properties": { "disableHTTP2": { "description": "DisableHTTP2 defines if http2 should be disabled for all servers.", @@ -266,7 +266,7 @@ "type": "object" }, "container": { - "description": "The container configuration for the NGINX container.", + "description": "The container configuration for the NGINX container. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", "required": [], "title": "container", "type": "object" @@ -341,7 +341,7 @@ "type": "boolean" }, "pod": { - "description": "The pod configuration for the NGINX data plane pod.", + "description": "The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", "required": [], "title": "pod", "type": "object" @@ -354,8 +354,14 @@ "type": "integer" }, "service": { - "description": "The service configuration for the NGINX data plane.", + "description": "The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", "properties": { + "annotations": { + "description": "The annotations of the NGINX data plane service.", + "required": [], + "title": "annotations", + "type": "object" + }, "externalTrafficPolicy": { "default": "Local", "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", @@ -366,6 +372,53 @@ "required": [], "title": "externalTrafficPolicy" }, + "loadBalancerClass": { + "default": "", + "description": "LoadBalancerClass is the class of the load balancer implementation this Service belongs to.\nRequires nginx.service.type set to LoadBalancer.", + "required": [], + "title": "loadBalancerClass", + "type": "string" + }, + "loadBalancerIP": { + "default": "", + "description": "The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer.", + "required": [], + "title": "loadBalancerIP", + "type": "string" + }, + "loadBalancerSourceRanges": { + "description": "The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer.", + "items": { + "required": [] + }, + "required": [], + "title": "loadBalancerSourceRanges", + "type": "array" + }, + "nodePorts": { + "description": "A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port,\notherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767.", + "items": { + "properties": { + "listenerPort": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + }, + "port": { + "maximum": 65535, + "minimum": 1, + "required": [], + "type": "integer" + } + }, + "required": [], + "type": "object" + }, + "required": [], + "title": "nodePorts", + "type": "array" + }, "type": { "default": "LoadBalancer", "description": "The type of service to create for the NGINX data plane.", diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index c41ea7bb9c..077faeff80 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -367,10 +367,12 @@ nginx: # value: # type: string # @schema - # -- The configuration for the data plane that is contained in the NginxProxy resource. + # -- The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways + # managed by this instance of NGINX Gateway Fabric. config: {} - # -- The pod configuration for the NGINX data plane pod. + # -- The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. pod: {} # -- The termination grace period of the NGINX data plane pod. # terminationGracePeriodSeconds: 30 @@ -391,7 +393,8 @@ nginx: # nginx.container.extraVolumeMounts mount additional volumes to the container. # extraVolumes: [] - # -- The container configuration for the NGINX container. + # -- The container configuration for the NGINX container. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. container: {} # -- The resource requirements of the NGINX container. # resources: {} @@ -402,7 +405,8 @@ nginx: # -- extraVolumeMounts are the additional volume mounts for the NGINX container. # extraVolumeMounts: [] - # -- The service configuration for the NGINX data plane. + # -- The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this + # instance of NGINX Gateway Fabric. service: # @schema # enum: @@ -422,17 +426,39 @@ nginx: externalTrafficPolicy: Local # -- The annotations of the NGINX data plane service. - # annotations: {} + annotations: {} # -- The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. - # loadBalancerIP: "" + loadBalancerIP: "" # -- LoadBalancerClass is the class of the load balancer implementation this Service belongs to. # Requires nginx.service.type set to LoadBalancer. - # loadBalancerClass: "" + loadBalancerClass: "" # -- The IP ranges (CIDR) that are allowed to access the load balancer. Requires nginx.service.type set to LoadBalancer. - # loadBalancerSourceRanges: [] + loadBalancerSourceRanges: [] + + # @schema + # type: array + # items: + # type: object + # properties: + # port: + # type: integer + # required: true + # minimum: 1 + # maximum: 65535 + # listenerPort: + # type: integer + # required: true + # minimum: 1 + # maximum: 65535 + # @schema + # -- A list of NodePorts to expose on the NGINX data plane service. Each NodePort MUST map to a Gateway listener port, + # otherwise it will be ignored. The default NodePort range enforced by Kubernetes is 30000-32767. + nodePorts: [] + # - port: 30025 + # listenerPort: 80 # -- Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. debug: false diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index 0e28520896..a74ea35b32 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -3498,6 +3498,36 @@ spec: items: type: string type: array + nodePorts: + description: |- + NodePorts are the list of NodePorts to expose on the NGINX data plane service. + Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + The default NodePort range enforced by Kubernetes is 30000-32767. + items: + description: |- + NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST + map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort + automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. + properties: + listenerPort: + description: |- + ListenerPort is the Gateway listener port that this NodePort maps to. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + port: + description: |- + Port is the NodePort to expose. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + required: + - listenerPort + - port + type: object + type: array type: default: LoadBalancer description: ServiceType describes ingress method for the diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 56cd27eacc..abb653ccc4 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -4083,6 +4083,36 @@ spec: items: type: string type: array + nodePorts: + description: |- + NodePorts are the list of NodePorts to expose on the NGINX data plane service. + Each NodePort MUST map to a Gateway listener port, otherwise it will be ignored. + The default NodePort range enforced by Kubernetes is 30000-32767. + items: + description: |- + NodePort creates a port on each node on which the NGINX data plane service is exposed. The NodePort MUST + map to a Gateway listener port, otherwise it will be ignored. If not specified, Kubernetes allocates a NodePort + automatically if required. The default NodePort range enforced by Kubernetes is 30000-32767. + properties: + listenerPort: + description: |- + ListenerPort is the Gateway listener port that this NodePort maps to. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + port: + description: |- + Port is the NodePort to expose. + kubebuilder:validation:Minimum=1 + kubebuilder:validation:Maximum=65535 + format: int32 + type: integer + required: + - listenerPort + - port + type: object + type: array type: default: LoadBalancer description: ServiceType describes ingress method for the diff --git a/internal/mode/static/handler.go b/internal/mode/static/handler.go index 3114bea746..2e2fe6e9c2 100644 --- a/internal/mode/static/handler.go +++ b/internal/mode/static/handler.go @@ -12,6 +12,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" @@ -472,8 +473,23 @@ func getGatewayAddresses( if svc == nil { svcName := controller.CreateNginxResourceName(gateway.Source.GetName(), gatewayClassName) key := types.NamespacedName{Name: svcName, Namespace: gateway.Source.GetNamespace()} - if err := k8sClient.Get(ctx, key, &gwSvc); err != nil { - return nil, fmt.Errorf("error finding Service for Gateway: %w", err) + + pollCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := wait.PollUntilContextCancel( + pollCtx, + 500*time.Millisecond, + true, /* poll immediately */ + func(ctx context.Context) (bool, error) { + if err := k8sClient.Get(ctx, key, &gwSvc); err != nil { + return false, nil //nolint:nilerr // need to retry without returning error + } + + return true, nil + }, + ); err != nil { + return nil, fmt.Errorf("error finding Service %s for Gateway: %w", svcName, err) } } else { gwSvc = *svc diff --git a/internal/mode/static/handler_test.go b/internal/mode/static/handler_test.go index df5ee9d70a..59ed8d8d2a 100644 --- a/internal/mode/static/handler_test.go +++ b/internal/mode/static/handler_test.go @@ -3,6 +3,7 @@ package static import ( "context" "errors" + "time" "github.com/go-logr/logr" pb "github.com/nginx/agent/v3/api/grpc/mpi/v1" @@ -87,8 +88,13 @@ var _ = Describe("eventHandler", func() { baseGraph = &graph.Graph{ Gateways: map[types.NamespacedName]*graph.Gateway{ {Namespace: "test", Name: "gateway"}: { - Valid: true, - Source: &gatewayv1.Gateway{}, + Valid: true, + Source: &gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "test", + }, + }, DeploymentName: types.NamespacedName{ Namespace: "test", Name: controller.CreateNginxResourceName("gateway", "nginx"), @@ -107,9 +113,19 @@ var _ = Describe("eventHandler", func() { fakeStatusUpdater = &statusfakes.FakeGroupUpdater{} fakeEventRecorder = record.NewFakeRecorder(1) zapLogLevelSetter = newZapLogLevelSetter(zap.NewAtomicLevel()) - fakeK8sClient = fake.NewFakeClient() queue = status.NewQueue() + gatewaySvc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "gateway-nginx", + }, + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.4", + }, + } + fakeK8sClient = fake.NewFakeClient(gatewaySvc) + handler = newEventHandlerImpl(eventHandlerConfig{ ctx: ctx, k8sClient: fakeK8sClient, @@ -520,16 +536,20 @@ var _ = Describe("getGatewayAddresses", func() { It("gets gateway addresses from a Service", func() { fakeClient := fake.NewFakeClient() - // no Service exists yet, should get error and Pod Address + // no Service exists yet, should get error and no Address gateway := &graph.Gateway{ Source: &gatewayv1.Gateway{ ObjectMeta: metav1.ObjectMeta{ Name: "gateway", - Namespace: "test-ns", + Namespace: "test", }, }, } - addrs, err := getGatewayAddresses(context.Background(), fakeClient, nil, gateway, "nginx") + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + addrs, err := getGatewayAddresses(ctx, fakeClient, nil, gateway, "nginx") Expect(err).To(HaveOccurred()) Expect(addrs).To(BeNil()) diff --git a/internal/mode/static/provisioner/handler.go b/internal/mode/static/provisioner/handler.go index ef6ba76b82..ee7813fb96 100644 --- a/internal/mode/static/provisioner/handler.go +++ b/internal/mode/static/provisioner/handler.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "reflect" "strings" "github.com/go-logr/logr" @@ -66,7 +67,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, gatewayName := objLabels.Get(controller.GatewayLabel) gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} - if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { logger.Error(err, "error handling resource update") } } @@ -76,7 +77,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, gatewayName := objLabels.Get(controller.GatewayLabel) gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} - if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { logger.Error(err, "error handling resource update") } @@ -93,12 +94,12 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, gatewayName := objLabels.Get(controller.GatewayLabel) gatewayNSName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: gatewayName} - if err := h.updateOrDeleteResources(ctx, obj, gatewayNSName); err != nil { + if err := h.updateOrDeleteResources(ctx, logger, obj, gatewayNSName); err != nil { logger.Error(err, "error handling resource update") } } else if h.provisioner.isUserSecret(obj.GetName()) { - if err := h.provisionResourcesForAllGateways(ctx); err != nil { - logger.Error(err, "error updating resources") + if err := h.provisionResourceForAllGateways(ctx, logger, obj); err != nil { + logger.Error(err, "error updating resource") } } default: @@ -144,6 +145,7 @@ func (h *eventHandler) HandleEventBatch(ctx context.Context, logger logr.Logger, // - are updated to the proper state in case a user makes a change directly to the resource. func (h *eventHandler) updateOrDeleteResources( ctx context.Context, + logger logr.Logger, obj client.Object, gatewayNSName types.NamespacedName, ) error { @@ -160,26 +162,55 @@ func (h *eventHandler) updateOrDeleteResources( return nil } + if h.store.getResourceVersionForObject(gatewayNSName, obj) == obj.GetResourceVersion() { + return nil + } + h.store.registerResourceInGatewayConfig(gatewayNSName, obj) - if err := h.provisionResources(ctx, gatewayNSName); err != nil { + if err := h.provisionResource(ctx, logger, gatewayNSName, obj); err != nil { return fmt.Errorf("error updating nginx resource: %w", err) } return nil } -func (h *eventHandler) provisionResources( +func (h *eventHandler) provisionResource( ctx context.Context, + logger logr.Logger, gatewayNSName types.NamespacedName, + obj client.Object, ) error { resources := h.store.getNginxResourcesForGateway(gatewayNSName) if resources != nil && resources.Gateway != nil { resourceName := controller.CreateNginxResourceName(gatewayNSName.Name, h.gcName) + + objects, err := h.provisioner.buildNginxResourceObjects( + resourceName, + resources.Gateway.Source, + resources.Gateway.EffectiveNginxProxy, + ) + if err != nil { + logger.Error(err, "error building some nginx resources") + } + + // only provision the object that was updated + var objectToProvision client.Object + for _, object := range objects { + if strings.HasSuffix(object.GetName(), obj.GetName()) && reflect.TypeOf(object) == reflect.TypeOf(obj) { + objectToProvision = object + break + } + } + + if objectToProvision == nil { + return nil + } + if err := h.provisioner.provisionNginx( ctx, resourceName, resources.Gateway.Source, - resources.Gateway.EffectiveNginxProxy, + []client.Object{objectToProvision}, ); err != nil { return fmt.Errorf("error updating nginx resource: %w", err) } @@ -204,13 +235,17 @@ func (h *eventHandler) reprovisionResources(ctx context.Context, event *events.D return nil } -// provisionResourcesForAllGateways is called when a resource is updated that needs to be applied +// provisionResourceForAllGateways is called when a resource is updated that needs to be applied // to all Gateway deployments. For example, NGINX Plus secrets. -func (h *eventHandler) provisionResourcesForAllGateways(ctx context.Context) error { +func (h *eventHandler) provisionResourceForAllGateways( + ctx context.Context, + logger logr.Logger, + obj client.Object, +) error { var allErrs []error gateways := h.store.getGateways() for gateway := range gateways { - if err := h.provisionResources(ctx, gateway); err != nil { + if err := h.provisionResource(ctx, logger, gateway, obj); err != nil { allErrs = append(allErrs, err) } } diff --git a/internal/mode/static/provisioner/handler_test.go b/internal/mode/static/provisioner/handler_test.go index 9fd0dcc8d1..1f44a27de0 100644 --- a/internal/mode/static/provisioner/handler_test.go +++ b/internal/mode/static/provisioner/handler_test.go @@ -48,25 +48,28 @@ func TestHandleEventBatch_Upsert(t *testing.T) { deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw-nginx", - Namespace: "default", - Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, + Name: "gw-nginx", + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, }, } service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw-nginx", - Namespace: "default", - Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "test-gateway"}, + Name: "gw-nginx", + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, }, } jwtSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw-nginx-" + jwtTestSecretName, - Namespace: "default", - Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "test-gateway"}, + Name: "gw-nginx-" + jwtTestSecretName, + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, }, Data: map[string][]byte{ "data": []byte("oldData"), @@ -86,8 +89,10 @@ func TestHandleEventBatch_Upsert(t *testing.T) { dockerSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "gw-nginx-" + dockerTestSecretName, - Namespace: "default", + Name: "gw-nginx-" + dockerTestSecretName, + Namespace: "default", + ResourceVersion: "1", + Labels: map[string]string{"app": "nginx", controller.GatewayLabel: "gw"}, }, Data: map[string][]byte{ "data": []byte("oldDockerData"), @@ -130,6 +135,7 @@ func TestHandleEventBatch_Upsert(t *testing.T) { handler.HandleEventBatch(ctx, logger, batch) g.Expect(provisioner.cfg.StatusQueue.Dequeue(ctx)).ToNot(BeNil()) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(service), &corev1.Service{})).To(Succeed()) // Test handling provisioned Secret upsertEvent = &events.UpsertEvent{Resource: jwtSecret} @@ -155,6 +161,10 @@ func TestHandleEventBatch_Upsert(t *testing.T) { g.Expect(secret.Data["data"]).To(Equal([]byte("newData"))) // Test handling user Docker Secret + upsertEvent = &events.UpsertEvent{Resource: dockerSecret} + batch = events.EventBatch{upsertEvent} + handler.HandleEventBatch(ctx, logger, batch) + g.Expect(fakeClient.Get(ctx, client.ObjectKeyFromObject(dockerSecret), secret)).To(Succeed()) g.Expect(secret.Data).To(HaveKey("data")) g.Expect(secret.Data["data"]).To(Equal([]byte("oldDockerData"))) @@ -181,6 +191,7 @@ func TestHandleEventBatch_Upsert(t *testing.T) { // do the same thing but when provisioner is not leader. // non-leader should not delete resources, but instead track them + deployment.ResourceVersion = "" g.Expect(fakeClient.Create(ctx, deployment)).To(Succeed()) provisioner.leader = false diff --git a/internal/mode/static/provisioner/objects.go b/internal/mode/static/provisioner/objects.go index 93ee801fef..57d6933b2c 100644 --- a/internal/mode/static/provisioner/objects.go +++ b/internal/mode/static/provisioner/objects.go @@ -438,6 +438,15 @@ func buildNginxService( Port: port, TargetPort: intstr.FromInt32(port), } + + if serviceType != corev1.ServiceTypeClusterIP { + for _, nodePort := range serviceCfg.NodePorts { + if nodePort.ListenerPort == port { + servicePort.NodePort = nodePort.Port + } + } + } + servicePorts = append(servicePorts, servicePort) } diff --git a/internal/mode/static/provisioner/objects_test.go b/internal/mode/static/provisioner/objects_test.go index aa3a8dd747..9225e8a0ce 100644 --- a/internal/mode/static/provisioner/objects_test.go +++ b/internal/mode/static/provisioner/objects_test.go @@ -92,7 +92,25 @@ func TestBuildNginxResourceObjects(t *testing.T) { } resourceName := "gw-nginx" - objects, err := provisioner.buildNginxResourceObjects(resourceName, gateway, &graph.EffectiveNginxProxy{}) + objects, err := provisioner.buildNginxResourceObjects( + resourceName, + gateway, + &graph.EffectiveNginxProxy{ + Kubernetes: &ngfAPIv1alpha2.KubernetesSpec{ + Service: &ngfAPIv1alpha2.ServiceSpec{ + NodePorts: []ngfAPIv1alpha2.NodePort{ + { + Port: 30000, + ListenerPort: 80, + }, + { // ignored + Port: 31000, + ListenerPort: 789, + }, + }, + }, + }, + }) g.Expect(err).ToNot(HaveOccurred()) g.Expect(objects).To(HaveLen(6)) @@ -150,6 +168,7 @@ func TestBuildNginxResourceObjects(t *testing.T) { Port: 80, Name: "port-80", TargetPort: intstr.FromInt(80), + NodePort: 30000, }, { Port: 8888, diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index 3151d56c4f..42bd761ba2 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -185,17 +185,12 @@ func (p *NginxProvisioner) provisionNginx( ctx context.Context, resourceName string, gateway *gatewayv1.Gateway, - nProxyCfg *graph.EffectiveNginxProxy, + objects []client.Object, ) error { if !p.isLeader() { return nil } - objects, err := p.buildNginxResourceObjects(resourceName, gateway, nProxyCfg) - if err != nil { - p.cfg.Logger.Error(err, "error provisioning some nginx resources") - } - p.cfg.Logger.Info( "Creating/Updating nginx resources", "namespace", gateway.GetNamespace(), @@ -261,6 +256,7 @@ func (p *NginxProvisioner) provisionNginx( "namespace", gateway.GetNamespace(), "name", resourceName, ) + p.store.registerResourceInGatewayConfig(client.ObjectKeyFromObject(gateway), obj) } // if agent configmap was updated, then we'll need to restart the deployment @@ -430,7 +426,12 @@ func (p *NginxProvisioner) RegisterGateway( } if gateway.Valid { - if err := p.provisionNginx(ctx, resourceName, gateway.Source, gateway.EffectiveNginxProxy); err != nil { + objects, err := p.buildNginxResourceObjects(resourceName, gateway.Source, gateway.EffectiveNginxProxy) + if err != nil { + p.cfg.Logger.Error(err, "error building some nginx resources") + } + + if err := p.provisionNginx(ctx, resourceName, gateway.Source, objects); err != nil { return fmt.Errorf("error provisioning nginx resources: %w", err) } } else { diff --git a/internal/mode/static/provisioner/store.go b/internal/mode/static/provisioner/store.go index 0af617852c..a487d29fb9 100644 --- a/internal/mode/static/provisioner/store.go +++ b/internal/mode/static/provisioner/store.go @@ -107,7 +107,7 @@ func (s *store) getGateways() map[types.NamespacedName]*gatewayv1.Gateway { // If the object being updated is the Gateway, check if anything that we care about changed. This ensures that // we don't attempt to update nginx resources when the main event handler triggers this call with an unrelated event // (like a Route update) that shouldn't result in nginx resource changes. -func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedName, object interface{}) bool { +func (s *store) registerResourceInGatewayConfig(gatewayNSName types.NamespacedName, object any) bool { s.lock.Lock() defer s.lock.Unlock() @@ -348,3 +348,75 @@ func secretResourceMatches(resources *NginxResources, nsName types.NamespacedNam func resourceMatches(objMeta metav1.ObjectMeta, nsName types.NamespacedName) bool { return objMeta.GetName() == nsName.Name && objMeta.GetNamespace() == nsName.Namespace } + +func (s *store) getResourceVersionForObject(gatewayNSName types.NamespacedName, object client.Object) string { + s.lock.RLock() + defer s.lock.RUnlock() + + resources, exists := s.nginxResources[gatewayNSName] + if !exists { + return "" + } + + switch obj := object.(type) { + case *appsv1.Deployment: + if resources.Deployment.GetName() == obj.GetName() { + return resources.Deployment.GetResourceVersion() + } + case *corev1.Service: + if resources.Service.GetName() == obj.GetName() { + return resources.Service.GetResourceVersion() + } + case *corev1.ServiceAccount: + if resources.ServiceAccount.GetName() == obj.GetName() { + return resources.ServiceAccount.GetResourceVersion() + } + case *rbacv1.Role: + if resources.Role.GetName() == obj.GetName() { + return resources.Role.GetResourceVersion() + } + case *rbacv1.RoleBinding: + if resources.RoleBinding.GetName() == obj.GetName() { + return resources.RoleBinding.GetResourceVersion() + } + case *corev1.ConfigMap: + return getResourceVersionForConfigMap(resources, obj) + case *corev1.Secret: + return getResourceVersionForSecret(resources, obj) + } + + return "" +} + +func getResourceVersionForConfigMap(resources *NginxResources, configmap *corev1.ConfigMap) string { + if resources.BootstrapConfigMap.GetName() == configmap.GetName() { + return resources.BootstrapConfigMap.GetResourceVersion() + } + if resources.AgentConfigMap.GetName() == configmap.GetName() { + return resources.AgentConfigMap.GetResourceVersion() + } + + return "" +} + +func getResourceVersionForSecret(resources *NginxResources, secret *corev1.Secret) string { + if resources.AgentTLSSecret.GetName() == secret.GetName() { + return resources.AgentTLSSecret.GetResourceVersion() + } + for _, dockerSecret := range resources.DockerSecrets { + if dockerSecret.GetName() == secret.GetName() { + return dockerSecret.GetResourceVersion() + } + } + if resources.PlusJWTSecret.GetName() == secret.GetName() { + return resources.PlusJWTSecret.GetResourceVersion() + } + if resources.PlusClientSSLSecret.GetName() == secret.GetName() { + return resources.PlusClientSSLSecret.GetResourceVersion() + } + if resources.PlusCASecret.GetName() == secret.GetName() { + return resources.PlusCASecret.GetResourceVersion() + } + + return "" +} diff --git a/internal/mode/static/provisioner/store_test.go b/internal/mode/static/provisioner/store_test.go index 59e7da1207..bc52728631 100644 --- a/internal/mode/static/provisioner/store_test.go +++ b/internal/mode/static/provisioner/store_test.go @@ -618,3 +618,221 @@ func TestGatewayExistsForResource(t *testing.T) { }) } } + +func TestGetResourceVersionForObject(t *testing.T) { + t.Parallel() + + store := newStore(nil, "", "", "", "") + nsName := types.NamespacedName{Name: "test-gateway", Namespace: "default"} + store.nginxResources[nsName] = &NginxResources{ + Deployment: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + ResourceVersion: "1", + }, + Service: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + ResourceVersion: "2", + }, + ServiceAccount: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + ResourceVersion: "3", + }, + Role: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + ResourceVersion: "4", + }, + RoleBinding: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + ResourceVersion: "5", + }, + BootstrapConfigMap: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + ResourceVersion: "6", + }, + AgentConfigMap: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + ResourceVersion: "7", + }, + AgentTLSSecret: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + ResourceVersion: "8", + }, + PlusJWTSecret: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + ResourceVersion: "9", + }, + PlusCASecret: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + ResourceVersion: "10", + }, + PlusClientSSLSecret: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + ResourceVersion: "11", + }, + DockerSecrets: []metav1.ObjectMeta{ + { + Name: "test-docker-secret", + Namespace: "default", + ResourceVersion: "12", + }, + }, + } + + tests := []struct { + name string + object client.Object + expectedResult string + }{ + { + name: "Deployment resource version", + object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + }, + expectedResult: "1", + }, + { + name: "Service resource version", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + expectedResult: "2", + }, + { + name: "ServiceAccount resource version", + object: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "default", + }, + }, + expectedResult: "3", + }, + { + name: "Role resource version", + object: &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "default", + }, + }, + expectedResult: "4", + }, + { + name: "RoleBinding resource version", + object: &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "default", + }, + }, + expectedResult: "5", + }, + { + name: "Bootstrap ConfigMap resource version", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bootstrap-configmap", + Namespace: "default", + }, + }, + expectedResult: "6", + }, + { + name: "Agent ConfigMap resource version", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-configmap", + Namespace: "default", + }, + }, + expectedResult: "7", + }, + { + name: "Agent TLS Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-agent-tls-secret", + Namespace: "default", + }, + }, + expectedResult: "8", + }, + { + name: "JWT Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-jwt-secret", + Namespace: "default", + }, + }, + expectedResult: "9", + }, + { + name: "CA Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ca-secret", + Namespace: "default", + }, + }, + expectedResult: "10", + }, + { + name: "Client SSL Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-client-ssl-secret", + Namespace: "default", + }, + }, + expectedResult: "11", + }, + { + name: "Docker Secret resource version", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-docker-secret", + Namespace: "default", + }, + }, + expectedResult: "12", + }, + { + name: "Non-existent resource", + object: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-service", + Namespace: "default", + }, + }, + expectedResult: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + result := store.getResourceVersionForObject(nsName, test.object) + g.Expect(result).To(Equal(test.expectedResult)) + }) + } +} From 700af68d2ff9b21aaad6675c5617e4b966b176ed Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Wed, 30 Apr 2025 15:37:02 -0700 Subject: [PATCH 28/32] CP/DP Split: Update documentation on accessing nginx container (#3338) Update documentation on accessing nginx container Problem: With our incoming changes to our control data plane split, the nginx container will no longer be in the NGF Pod. Thus all documentation on accessing the nginx container (logs, sending traffic, config...) need to be updated. Solution: Updated the documentation. Mainly, when sending traffic in the examples, the host and IP of the NGINX Service are recorded after the Gateway is deployed. Most of these changes are in our examples. --- docs/developer/testing.md | 6 +- examples/cafe-example/README.md | 21 +- examples/cross-namespace-routing/README.md | 21 +- examples/grpc-routing/README.md | 51 +++-- examples/https-termination/README.md | 215 +-------------------- examples/traffic-splitting/README.md | 21 +- 6 files changed, 65 insertions(+), 270 deletions(-) diff --git a/docs/developer/testing.md b/docs/developer/testing.md index 7804930ff3..daae07ee6a 100644 --- a/docs/developer/testing.md +++ b/docs/developer/testing.md @@ -69,19 +69,19 @@ Follow the steps below for manual testing: - Logs of the `nginx-gateway` container. Look out for unexpected error logs or panics. ```shell - kubectl logs -n nginx-gateway -l app=nginx-gateway + kubectl -n nginx-gateway logs -c nginx-gateway ``` - Logs of the `nginx` container. Look for unexpected error logs and verify the access logs are correct. ```shell - kubectl logs -n nginx-gateway -l app=nginx + kubectl -n logs -c nginx ``` - The generated nginx config. Make sure it's correct. ```shell - kubectl exec -it -n nginx-gateway -c nginx -- nginx -T + kubectl exec -it -n -c nginx -- nginx -T ``` - The statuses of the Gateway API Resources. Make sure they look correct. diff --git a/examples/cafe-example/README.md b/examples/cafe-example/README.md index e0254de815..d10f871e05 100644 --- a/examples/cafe-example/README.md +++ b/examples/cafe-example/README.md @@ -9,18 +9,6 @@ to route traffic to that application using HTTPRoute resources. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Cafe Application 1. Create the coffee and the tea Deployments and Services: @@ -49,6 +37,15 @@ to route traffic to that application using HTTPRoute resources. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell diff --git a/examples/cross-namespace-routing/README.md b/examples/cross-namespace-routing/README.md index 3e774cff46..9e98908a63 100644 --- a/examples/cross-namespace-routing/README.md +++ b/examples/cross-namespace-routing/README.md @@ -9,18 +9,6 @@ in a different namespace from our HTTPRoutes. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Cafe Application 1. Create the cafe namespace and cafe application: @@ -49,6 +37,15 @@ in a different namespace from our HTTPRoutes. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell diff --git a/examples/grpc-routing/README.md b/examples/grpc-routing/README.md index d58ac43fd1..3f294978ce 100644 --- a/examples/grpc-routing/README.md +++ b/examples/grpc-routing/README.md @@ -9,18 +9,6 @@ to route traffic to that application using GRPCRoute resources. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Helloworld Application 1. Create the two helloworld Deployments and Services: @@ -60,7 +48,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f exact-method.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "exact"}' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -72,7 +69,7 @@ There are 3 options to configure gRPC routing. To access the application and tes } ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f exact-method.yaml @@ -86,7 +83,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f hostname.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "bar server"}' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -132,7 +138,7 @@ There are 3 options to configure gRPC routing. To access the application and tes 2024/04/29 09:29:46 Received: foo bar server ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f hostname.yaml @@ -146,7 +152,16 @@ There are 3 options to configure gRPC routing. To access the application and tes kubectl apply -f headers.yaml ``` -2. Test the Application: + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + +1. Test the Application: ```shell grpcurl -plaintext -proto grpc.proto -authority bar.com -d '{"name": "version one"}' -H 'version: one' ${GW_IP}:${GW_PORT} helloworld.Greeter/SayHello @@ -230,7 +245,7 @@ There are 3 options to configure gRPC routing. To access the application and tes 2024/04/29 09:33:26 Received: version two orange ``` -3. Clean up the Gateway and GRPCRoute resources: +1. Clean up the Gateway and GRPCRoute resources: ```shell kubectl delete -f headers.yaml diff --git a/examples/https-termination/README.md b/examples/https-termination/README.md index 7d811babe1..8e7245e467 100644 --- a/examples/https-termination/README.md +++ b/examples/https-termination/README.md @@ -1,214 +1,3 @@ -# HTTPS Termination Example +# HTTPS Termination -In this example, we expand on the simple [cafe-example](../cafe-example) by adding HTTPS termination to our routes and -an HTTPS redirect from port 80 to 443. We will also show how you can use a ReferenceGrant to permit your Gateway to -reference a Secret in a different Namespace. - -## Running the Example - -## 1. Deploy NGINX Gateway Fabric - -1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. - -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the ports of NGINX Gateway Fabric: - - ```text - GW_HTTP_PORT= - GW_HTTPS_PORT= - ``` - -## 2. Deploy the Cafe Application - -1. Create the coffee and the tea Deployments and Services: - - ```shell - kubectl apply -f cafe.yaml - ``` - -1. Check that the Pods are running in the `default` namespace: - - ```shell - kubectl -n default get pods - ``` - - ```text - NAME READY STATUS RESTARTS AGE - coffee-6f4b79b975-2sb28 1/1 Running 0 12s - tea-6fb46d899f-fm7zr 1/1 Running 0 12s - ``` - -## 3. Configure HTTPS Termination and Routing - -1. Create the Namespace `certificate` and a Secret with a TLS certificate and key: - - ```shell - kubectl apply -f certificate-ns-and-cafe-secret.yaml - ``` - - The TLS certificate and key in this Secret are used to terminate the TLS connections for the cafe application. - > **Important**: This certificate and key are for demo purposes only. - -1. Create the ReferenceGrant: - - ```shell - kubectl apply -f reference-grant.yaml - ``` - - This ReferenceGrant allows all Gateways in the `default` namespace to reference the `cafe-secret` Secret in - the `certificate` Namespace. - -1. Create the Gateway resource: - - ```shell - kubectl apply -f gateway.yaml - ``` - - This [Gateway](./gateway.yaml) configures: - - `http` listener for HTTP traffic - - `https` listener for HTTPS traffic. It terminates TLS connections using the `cafe-secret` we created in step 1. - -1. Create the HTTPRoute resources: - - ```shell - kubectl apply -f cafe-routes.yaml - ``` - - To configure HTTPS termination for our cafe application, we will bind our `coffee` and `tea` HTTPRoutes to - the `https` listener in [cafe-routes.yaml](./cafe-routes.yaml) using - the [`parentReference`](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.ParentReference) - field: - - ```yaml - parentRefs: - - name: gateway - sectionName: https - ``` - - To configure an HTTPS redirect from port 80 to 443, we will bind the special `cafe-tls-redirect` HTTPRoute with - a [`HTTPRequestRedirectFilter`](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.HTTPRequestRedirectFilter) - to the `http` listener: - - ```yaml - parentRefs: - - name: gateway - sectionName: http - ``` - -## 4. Test the Application - -To access the application, we will use `curl` to send requests to the `coffee` and `tea` Services. First, we will access -the application over HTTP to test that the HTTPS redirect works. Then we will use HTTPS. - -### 4.1 Test HTTPS Redirect - -To test that NGINX sends an HTTPS redirect, we will send requests to the `coffee` and `tea` Services on HTTP port. We -will use curl's `--include` option to print the response headers (we are interested in the `Location` header). - -To get a redirect for coffee: - -```shell -curl --resolve cafe.example.com:$GW_HTTP_PORT:$GW_IP http://cafe.example.com:$GW_HTTP_PORT/coffee --include -``` - -```text -HTTP/1.1 302 Moved Temporarily -... -Location: https://cafe.example.com/coffee -... -``` - -To get a redirect for tea: - -```shell -curl --resolve cafe.example.com:$GW_HTTP_PORT:$GW_IP http://cafe.example.com:$GW_HTTP_PORT/tea --include -``` - -```text -HTTP/1.1 302 Moved Temporarily -... -Location: https://cafe.example.com/tea -... -``` - -### 4.2 Access Coffee and Tea - -Now we will access the application over HTTPS. Since our certificate is self-signed, we will use curl's `--insecure` -option to turn off certificate verification. - -To get coffee: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/coffee --insecure -``` - -```text -Server address: 10.12.0.18:80 -Server name: coffee-7586895968-r26zn -``` - -To get tea: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/tea --insecure -``` - -```text -Server address: 10.12.0.19:80 -Server name: tea-7cd44fcb4d-xfw2x -``` - -### 4.3 Remove the ReferenceGrant - -To restrict access to the `cafe-secret` in the `certificate` Namespace, we can delete the ReferenceGrant we created in -Step 3: - -```shell -kubectl delete -f reference-grant.yaml -``` - -Now, if we try to access the application over HTTPS, we will get a connection refused error: - -```shell -curl --resolve cafe.example.com:$GW_HTTPS_PORT:$GW_IP https://cafe.example.com:$GW_HTTPS_PORT/coffee --insecure -vvv -``` - -```text -... -curl: (7) Failed to connect to cafe.example.com port 443 after 0 ms: Connection refused -``` - - -You can also check the conditions of the Gateway `https` Listener to verify the that the reference is not permitted: - -```shell - kubectl describe gateway gateway -``` - -```text - Name: https - Conditions: - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: RefNotPermitted - Status: False - Type: Accepted - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: RefNotPermitted - Status: False - Type: ResolvedRefs - Last Transition Time: 2023-06-26T20:23:56Z - Message: Certificate ref to secret certificate/cafe-secret not permitted by any ReferenceGrant - Observed Generation: 1 - Reason: Invalid - Status: False - Type: Programmed -``` +This directory contains the YAML files used in the [HTTPS Termination](https://docs.nginx.com/nginx-gateway-fabric/how-to/traffic-management/https-termination/) guide. diff --git a/examples/traffic-splitting/README.md b/examples/traffic-splitting/README.md index 0479722cff..d3a07061b5 100644 --- a/examples/traffic-splitting/README.md +++ b/examples/traffic-splitting/README.md @@ -11,18 +11,6 @@ and `coffee-v2`. 1. Follow the [installation instructions](https://docs.nginx.com/nginx-gateway-fabric/installation/) to deploy NGINX Gateway Fabric. -1. Save the public IP address of NGINX Gateway Fabric into a shell variable: - - ```text - GW_IP=XXX.YYY.ZZZ.III - ``` - -1. Save the port of NGINX Gateway Fabric: - - ```text - GW_PORT= - ``` - ## 2. Deploy the Coffee Application 1. Create the Cafe Deployments and Services: @@ -51,6 +39,15 @@ and `coffee-v2`. kubectl apply -f gateway.yaml ``` + After creating the Gateway resource, NGINX Gateway Fabric will provision an NGINX Pod and Service fronting it to route traffic. + + Save the public IP address and port of the NGINX Service into shell variables: + + ```text + GW_IP=XXX.YYY.ZZZ.III + GW_PORT= + ``` + 1. Create the HTTPRoute resources: ```shell From ba084952d0b676a68f415390e4c040e29e2d33f1 Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Thu, 1 May 2025 16:07:04 -0700 Subject: [PATCH 29/32] CP/DP Split: Collect telemetry for cp dp split (#3352) Collect telemetry for number of data plane pods, control plane pods, and nginx proxy resources attached to a gateway. Problem: With the control data plane split, there are a few telemetry metrics which need to be updated or added. Solution: Update/Add telemetry metrics. --- internal/mode/static/telemetry/collector.go | 44 ++++++++++- .../mode/static/telemetry/collector_test.go | 79 ++++++++++++++++--- internal/mode/static/telemetry/data.avdl | 10 ++- .../telemetry/data_attributes_generated.go | 3 +- internal/mode/static/telemetry/data_test.go | 12 ++- .../ngfresourcecounts_attributes_generated.go | 1 + tests/suite/telemetry_test.go | 4 +- 7 files changed, 131 insertions(+), 22 deletions(-) diff --git a/internal/mode/static/telemetry/collector.go b/internal/mode/static/telemetry/collector.go index 8277515f56..66a476c66b 100644 --- a/internal/mode/static/telemetry/collector.go +++ b/internal/mode/static/telemetry/collector.go @@ -60,8 +60,10 @@ type Data struct { // then lastly by directive string. SnippetsFiltersDirectivesCount []int64 NGFResourceCounts // embedding is required by the generator. - // NGFReplicaCount is the number of replicas of the NGF Pod. - NGFReplicaCount int64 + // NginxPodCount is the total number of Nginx data plane Pods. + NginxPodCount int64 + // ControlPlanePodCount is the total number of NGF control plane Pods. + ControlPlanePodCount int64 } // NGFResourceCounts stores the counts of all relevant resources that NGF processes and generates configuration from. @@ -99,6 +101,8 @@ type NGFResourceCounts struct { SnippetsFilterCount int64 // UpstreamSettingsPolicyCount is the number of UpstreamSettingsPolicies. UpstreamSettingsPolicyCount int64 + // GatewayAttachedNpCount is the total number of NginxProxy resources that are attached to a Gateway. + GatewayAttachedNpCount int64 } // DataCollectorConfig holds configuration parameters for DataCollectorImpl. @@ -164,6 +168,8 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { snippetsFiltersDirectives, snippetsFiltersDirectivesCount := collectSnippetsFilterDirectives(g) + nginxPodCount := getNginxPodCount(g) + data := Data{ Data: tel.Data{ ProjectName: "NGF", @@ -179,9 +185,10 @@ func (c DataCollectorImpl) Collect(ctx context.Context) (Data, error) { ImageSource: c.cfg.ImageSource, FlagNames: c.cfg.Flags.Names, FlagValues: c.cfg.Flags.Values, - NGFReplicaCount: int64(replicaCount), SnippetsFiltersDirectives: snippetsFiltersDirectives, SnippetsFiltersDirectivesCount: snippetsFiltersDirectivesCount, + NginxPodCount: nginxPodCount, + ControlPlanePodCount: int64(replicaCount), } return data, nil @@ -241,6 +248,18 @@ func collectGraphResourceCount( ngfResourceCounts.NginxProxyCount = int64(len(g.ReferencedNginxProxies)) ngfResourceCounts.SnippetsFilterCount = int64(len(g.SnippetsFilters)) + var gatewayAttachedNPCount int64 + if g.GatewayClass != nil && g.GatewayClass.NginxProxy != nil { + gatewayClassNP := g.GatewayClass.NginxProxy + for _, np := range g.ReferencedNginxProxies { + if np != gatewayClassNP { + gatewayAttachedNPCount++ + } + } + } + + ngfResourceCounts.GatewayAttachedNpCount = gatewayAttachedNPCount + return ngfResourceCounts } @@ -495,3 +514,22 @@ func parseDirectiveContextMapIntoLists(directiveContextMap map[sfDirectiveContex return directiveContextList, countList } + +func getNginxPodCount(g *graph.Graph) int64 { + var count int64 + for _, gateway := range g.Gateways { + replicas := int64(1) + + np := gateway.EffectiveNginxProxy + if np != nil && + np.Kubernetes != nil && + np.Kubernetes.Deployment != nil && + np.Kubernetes.Deployment.Replicas != nil { + replicas = int64(*np.Kubernetes.Deployment.Replicas) + } + + count += replicas + } + + return count +} diff --git a/internal/mode/static/telemetry/collector_test.go b/internal/mode/static/telemetry/collector_test.go index ce75e47c02..c8a17286dd 100644 --- a/internal/mode/static/telemetry/collector_test.go +++ b/internal/mode/static/telemetry/collector_test.go @@ -18,6 +18,8 @@ import ( gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ngfAPI "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" + "github.com/nginx/nginx-gateway-fabric/apis/v1alpha2" + "github.com/nginx/nginx-gateway-fabric/internal/framework/helpers" "github.com/nginx/nginx-gateway-fabric/internal/framework/kinds" "github.com/nginx/nginx-gateway-fabric/internal/framework/kubernetes/kubernetesfakes" "github.com/nginx/nginx-gateway-fabric/internal/mode/static/config" @@ -170,7 +172,7 @@ var _ = Describe("Collector", Ordered, func() { ClusterNodeCount: 1, }, NGFResourceCounts: telemetry.NGFResourceCounts{}, - NGFReplicaCount: 1, + ControlPlanePodCount: 1, ImageSource: "local", FlagNames: flags.Names, FlagValues: flags.Values, @@ -262,6 +264,24 @@ var _ = Describe("Collector", Ordered, func() { k8sClientReader.ListCalls(createListCallsFunc(nodes)) + k8sClientReader.GetCalls(mergeGetCallsWithBase(createGetCallsFunc( + &appsv1.ReplicaSet{ + Spec: appsv1.ReplicaSetSpec{ + Replicas: helpers.GetPointer(int32(2)), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replica", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "Deployment1", + UID: "test-uid-replicaSet", + }, + }, + }, + }, + ))) + secret1 := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret1"}} secret2 := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret2"}} nilsecret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "nilsecret"}} @@ -270,11 +290,33 @@ var _ = Describe("Collector", Ordered, func() { svc2 := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc2"}} nilsvc := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nilsvc"}} + gcNP := graph.NginxProxy{ + Source: nil, + ErrMsgs: nil, + Valid: false, + } + graph := &graph.Graph{ - GatewayClass: &graph.GatewayClass{}, + GatewayClass: &graph.GatewayClass{NginxProxy: &gcNP}, Gateways: map[types.NamespacedName]*graph.Gateway{ - {Name: "gateway1"}: {}, - {Name: "gateway2"}: {}, + {Name: "gateway1"}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &v1alpha2.KubernetesSpec{ + Deployment: &v1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(1)), + }, + }, + }, + }, + {Name: "gateway2"}: { + EffectiveNginxProxy: &graph.EffectiveNginxProxy{ + Kubernetes: &v1alpha2.KubernetesSpec{ + Deployment: &v1alpha2.DeploymentSpec{ + Replicas: helpers.GetPointer(int32(3)), + }, + }, + }, + }, {Name: "gateway3"}: {}, }, IgnoredGatewayClasses: map[types.NamespacedName]*gatewayv1.GatewayClass{ @@ -335,9 +377,11 @@ var _ = Describe("Collector", Ordered, func() { }: {}, }, ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ - {Namespace: "test", Name: "NginxProxy-1"}: {}, - {Namespace: "test", Name: "NginxProxy-2"}: {}, - }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ + {Namespace: "test", Name: "NginxProxy-1"}: &gcNP, + {Namespace: "test", Name: "NginxProxy-2"}: {Valid: true}, + {Namespace: "test", Name: "NginxProxy-3"}: {Valid: true}, + }, + SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: { Snippets: map[ngfAPI.NginxContext]string{ ngfAPI.NginxContextMain: "worker_priority 0;", @@ -432,9 +476,10 @@ var _ = Describe("Collector", Ordered, func() { GatewayAttachedClientSettingsPolicyCount: 1, RouteAttachedClientSettingsPolicyCount: 2, ObservabilityPolicyCount: 1, - NginxProxyCount: 2, + NginxProxyCount: 3, SnippetsFilterCount: 3, UpstreamSettingsPolicyCount: 1, + GatewayAttachedNpCount: 2, } expData.ClusterVersion = "1.29.2" expData.ClusterPlatform = "kind" @@ -462,6 +507,11 @@ var _ = Describe("Collector", Ordered, func() { 1, } + // one gateway with one replica + one gateway with three replicas + one gateway with replica field + // empty + expData.NginxPodCount = int64(5) + expData.ControlPlanePodCount = int64(2) + data, err := dataCollector.Collect(ctx) Expect(err).ToNot(HaveOccurred()) @@ -593,7 +643,7 @@ var _ = Describe("Collector", Ordered, func() { svc := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1"}} graph1 = &graph.Graph{ - GatewayClass: &graph.GatewayClass{}, + GatewayClass: &graph.GatewayClass{NginxProxy: &graph.NginxProxy{Valid: true}}, Gateways: map[types.NamespacedName]*graph.Gateway{ {Name: "gateway1"}: {}, }, @@ -634,12 +684,14 @@ var _ = Describe("Collector", Ordered, func() { }: {}, }, ReferencedNginxProxies: map[types.NamespacedName]*graph.NginxProxy{ - {Namespace: "test", Name: "NginxProxy-1"}: {}, - {Namespace: "test", Name: "NginxProxy-2"}: {}, + {Namespace: "test", Name: "NginxProxy-1"}: {Valid: true}, }, SnippetsFilters: map[types.NamespacedName]*graph.SnippetsFilter{ {Namespace: "test", Name: "sf-1"}: {}, }, + BackendTLSPolicies: map[types.NamespacedName]*graph.BackendTLSPolicy{ + {Namespace: "test", Name: "BackendTLSPolicy-1"}: {}, + }, } config1 = []*dataplane.Configuration{ @@ -716,10 +768,13 @@ var _ = Describe("Collector", Ordered, func() { GatewayAttachedClientSettingsPolicyCount: 1, RouteAttachedClientSettingsPolicyCount: 1, ObservabilityPolicyCount: 1, - NginxProxyCount: 2, + NginxProxyCount: 1, SnippetsFilterCount: 1, UpstreamSettingsPolicyCount: 1, + GatewayAttachedNpCount: 1, + BackendTLSPolicyCount: 1, } + expData.NginxPodCount = 1 data, err := dataCollector.Collect(ctx) diff --git a/internal/mode/static/telemetry/data.avdl b/internal/mode/static/telemetry/data.avdl index 6909878866..95d99f316b 100644 --- a/internal/mode/static/telemetry/data.avdl +++ b/internal/mode/static/telemetry/data.avdl @@ -102,8 +102,14 @@ attached at the Gateway level. */ /** UpstreamSettingsPolicyCount is the number of UpstreamSettingsPolicies. */ long? UpstreamSettingsPolicyCount = null; - /** NGFReplicaCount is the number of replicas of the NGF Pod. */ - long? NGFReplicaCount = null; + /** GatewayAttachedNpCount is the total number of NginxProxy resources that are attached to a Gateway. */ + long? GatewayAttachedNpCount = null; + + /** NginxPodCount is the total number of Nginx data plane Pods. */ + long? NginxPodCount = null; + + /** ControlPlanePodCount is the total number of NGF control plane Pods. */ + long? ControlPlanePodCount = null; } } diff --git a/internal/mode/static/telemetry/data_attributes_generated.go b/internal/mode/static/telemetry/data_attributes_generated.go index 553925b0fd..afbd8dfb1f 100644 --- a/internal/mode/static/telemetry/data_attributes_generated.go +++ b/internal/mode/static/telemetry/data_attributes_generated.go @@ -20,7 +20,8 @@ func (d *Data) Attributes() []attribute.KeyValue { attrs = append(attrs, attribute.StringSlice("SnippetsFiltersDirectives", d.SnippetsFiltersDirectives)) attrs = append(attrs, attribute.Int64Slice("SnippetsFiltersDirectivesCount", d.SnippetsFiltersDirectivesCount)) attrs = append(attrs, d.NGFResourceCounts.Attributes()...) - attrs = append(attrs, attribute.Int64("NGFReplicaCount", d.NGFReplicaCount)) + attrs = append(attrs, attribute.Int64("NginxPodCount", d.NginxPodCount)) + attrs = append(attrs, attribute.Int64("ControlPlanePodCount", d.ControlPlanePodCount)) return attrs } diff --git a/internal/mode/static/telemetry/data_test.go b/internal/mode/static/telemetry/data_test.go index d2dfe9516b..867424e145 100644 --- a/internal/mode/static/telemetry/data_test.go +++ b/internal/mode/static/telemetry/data_test.go @@ -40,10 +40,12 @@ func TestDataAttributes(t *testing.T) { NginxProxyCount: 12, SnippetsFilterCount: 13, UpstreamSettingsPolicyCount: 14, + GatewayAttachedNpCount: 15, }, - NGFReplicaCount: 3, SnippetsFiltersDirectives: []string{"main-three-count", "http-two-count", "server-one-count"}, SnippetsFiltersDirectivesCount: []int64{3, 2, 1}, + NginxPodCount: 3, + ControlPlanePodCount: 3, } expected := []attribute.KeyValue{ @@ -79,7 +81,9 @@ func TestDataAttributes(t *testing.T) { attribute.Int64("NginxProxyCount", 12), attribute.Int64("SnippetsFilterCount", 13), attribute.Int64("UpstreamSettingsPolicyCount", 14), - attribute.Int64("NGFReplicaCount", 3), + attribute.Int64("GatewayAttachedNpCount", 15), + attribute.Int64("NginxPodCount", 3), + attribute.Int64("ControlPlanePodCount", 3), } result := data.Attributes() @@ -122,7 +126,9 @@ func TestDataAttributesWithEmptyData(t *testing.T) { attribute.Int64("NginxProxyCount", 0), attribute.Int64("SnippetsFilterCount", 0), attribute.Int64("UpstreamSettingsPolicyCount", 0), - attribute.Int64("NGFReplicaCount", 0), + attribute.Int64("GatewayAttachedNpCount", 0), + attribute.Int64("NginxPodCount", 0), + attribute.Int64("ControlPlanePodCount", 0), } result := data.Attributes() diff --git a/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go b/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go index baddcd174d..3073f15eb4 100644 --- a/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go +++ b/internal/mode/static/telemetry/ngfresourcecounts_attributes_generated.go @@ -27,6 +27,7 @@ func (d *NGFResourceCounts) Attributes() []attribute.KeyValue { attrs = append(attrs, attribute.Int64("NginxProxyCount", d.NginxProxyCount)) attrs = append(attrs, attribute.Int64("SnippetsFilterCount", d.SnippetsFilterCount)) attrs = append(attrs, attribute.Int64("UpstreamSettingsPolicyCount", d.UpstreamSettingsPolicyCount)) + attrs = append(attrs, attribute.Int64("GatewayAttachedNpCount", d.GatewayAttachedNpCount)) return attrs } diff --git a/tests/suite/telemetry_test.go b/tests/suite/telemetry_test.go index c823cc590c..7c81b74342 100644 --- a/tests/suite/telemetry_test.go +++ b/tests/suite/telemetry_test.go @@ -92,7 +92,9 @@ var _ = Describe("Telemetry test with OTel collector", Label("telemetry"), func( "NginxProxyCount: Int(1)", "SnippetsFilterCount: Int(0)", "UpstreamSettingsPolicyCount: Int(0)", - "NGFReplicaCount: Int(1)", + "GatewayAttachedNpCount: Int(0)", + "NginxPodCount: Int(0)", + "ControlPlanePodCount: Int(1)", }, ) }) From b4228544e21a5b8844b96d3589c7fb3a2e1ac355 Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Tue, 6 May 2025 12:04:08 -0600 Subject: [PATCH 30/32] CP/DP Split: update a few more container references (#3359) --- .github/ISSUE_TEMPLATE/bug_report.md | 4 ++-- docs/developer/testing.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 551b7f5d79..d4fbfaf839 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -24,8 +24,8 @@ A clear and concise description of what you expected to happen. * Version of Kubernetes * Kubernetes platform (e.g. Mini-kube or GCP) * Details on how you expose the NGINX Gateway Fabric Pod (e.g. Service of type LoadBalancer or port-forward) -* Logs of NGINX container: `kubectl -n nginx-gateway logs -l app=nginx-gateway -c nginx` -* NGINX Configuration: `kubectl -n nginx-gateway exec -c nginx -- nginx -T` +* Logs of NGINX container: `kubectl -n logs deployments/` +* NGINX Configuration: `kubectl -n exec -it deployments/ -- nginx -T` **Additional context** Add any other context about the problem here. Any log files you want to share. diff --git a/docs/developer/testing.md b/docs/developer/testing.md index daae07ee6a..bc19f145cb 100644 --- a/docs/developer/testing.md +++ b/docs/developer/testing.md @@ -69,19 +69,19 @@ Follow the steps below for manual testing: - Logs of the `nginx-gateway` container. Look out for unexpected error logs or panics. ```shell - kubectl -n nginx-gateway logs -c nginx-gateway + kubectl -n nginx-gateway logs ``` - Logs of the `nginx` container. Look for unexpected error logs and verify the access logs are correct. ```shell - kubectl -n logs -c nginx + kubectl -n logs ``` - The generated nginx config. Make sure it's correct. ```shell - kubectl exec -it -n -c nginx -- nginx -T + kubectl exec -it -n -- nginx -T ``` - The statuses of the Gateway API Resources. Make sure they look correct. From 34094d5d134dafc9c713b245d0a58cd190fcbe7b Mon Sep 17 00:00:00 2001 From: Saylor Berman Date: Wed, 14 May 2025 09:50:18 -0600 Subject: [PATCH 31/32] CP/DP Split: fix label updates (#3370) Problem: Updating labels/annotations on the Gateway did not propagate to some resources. Solution: Ensure that labels/annotations are set when updating resources. --- build/Dockerfile.nginx | 2 +- build/Dockerfile.nginxplus | 2 +- go.mod | 2 +- go.sum | 4 +- .../mode/static/provisioner/provisioner.go | 2 +- internal/mode/static/provisioner/setter.go | 79 ++++++++++++++++--- internal/mode/static/provisioner/templates.go | 1 - .../static/state/conditions/conditions.go | 32 -------- 8 files changed, 73 insertions(+), 51 deletions(-) diff --git a/build/Dockerfile.nginx b/build/Dockerfile.nginx index b5033574da..3d5dc24241 100644 --- a/build/Dockerfile.nginx +++ b/build/Dockerfile.nginx @@ -12,7 +12,7 @@ WORKDIR /tmp RUN apk add --no-cache git make \ && git clone https://github.com/nginx/agent.git \ && cd agent \ - && git checkout v3 \ + && git checkout e745a3236e0f02a579461a5a435b3bcd410a686c \ && make build FROM nginx:1.28.0-alpine-otel diff --git a/build/Dockerfile.nginxplus b/build/Dockerfile.nginxplus index 42d0228e62..2c7d7452aa 100644 --- a/build/Dockerfile.nginxplus +++ b/build/Dockerfile.nginxplus @@ -11,7 +11,7 @@ WORKDIR /tmp RUN apk add --no-cache git make \ && git clone https://github.com/nginx/agent.git \ && cd agent \ - && git checkout v3 \ + && git checkout e745a3236e0f02a579461a5a435b3bcd410a686c \ && make build FROM alpine:3.21 diff --git a/go.mod b/go.mod index 561df41494..a3d470c3ec 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 - github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e + github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f github.com/nginx/telemetry-exporter v0.1.4 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 diff --git a/go.sum b/go.sum index c49cce28ac..373a6764ea 100644 --- a/go.sum +++ b/go.sum @@ -133,8 +133,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e h1:Cw/fGXymS9ytwusxE7TaySDovKH+yQuWRI0vLJ4rJxU= -github.com/nginx/agent/v3 v3.0.0-20250429163223-735f50381a9e/go.mod h1:O/31aKtii/mpiZmFGMcTNDoLtKzwTyTXOBMSRkMaPvs= +github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f h1:fSUAaR1AxmmbmGMRkvKGY2+LhuVpBp7tbBFLLgDMjNQ= +github.com/nginx/agent/v3 v3.0.0-20250513105855-e745a3236e0f/go.mod h1:O/31aKtii/mpiZmFGMcTNDoLtKzwTyTXOBMSRkMaPvs= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o= diff --git a/internal/mode/static/provisioner/provisioner.go b/internal/mode/static/provisioner/provisioner.go index 42bd761ba2..0b6548b416 100644 --- a/internal/mode/static/provisioner/provisioner.go +++ b/internal/mode/static/provisioner/provisioner.go @@ -260,7 +260,7 @@ func (p *NginxProvisioner) provisionNginx( } // if agent configmap was updated, then we'll need to restart the deployment - if agentConfigMapUpdated && !deploymentCreated { + if agentConfigMapUpdated && !deploymentCreated && deploymentObj != nil { updateCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() diff --git a/internal/mode/static/provisioner/setter.go b/internal/mode/static/provisioner/setter.go index 3d5c84e780..eff556e434 100644 --- a/internal/mode/static/provisioner/setter.go +++ b/internal/mode/static/provisioner/setter.go @@ -1,9 +1,12 @@ package provisioner import ( + "maps" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -12,54 +15,103 @@ import ( func objectSpecSetter(object client.Object) controllerutil.MutateFn { switch obj := object.(type) { case *appsv1.Deployment: - return deploymentSpecSetter(obj, obj.Spec) + return deploymentSpecSetter(obj, obj.Spec, obj.ObjectMeta) case *corev1.Service: - return serviceSpecSetter(obj, obj.Spec) + return serviceSpecSetter(obj, obj.Spec, obj.ObjectMeta) case *corev1.ServiceAccount: - return func() error { return nil } + return serviceAccountSpecSetter(obj, obj.ObjectMeta) case *corev1.ConfigMap: - return configMapSpecSetter(obj, obj.Data) + return configMapSpecSetter(obj, obj.Data, obj.ObjectMeta) case *corev1.Secret: - return secretSpecSetter(obj, obj.Data) + return secretSpecSetter(obj, obj.Data, obj.ObjectMeta) case *rbacv1.Role: - return roleSpecSetter(obj, obj.Rules) + return roleSpecSetter(obj, obj.Rules, obj.ObjectMeta) case *rbacv1.RoleBinding: - return roleBindingSpecSetter(obj, obj.RoleRef, obj.Subjects) + return roleBindingSpecSetter(obj, obj.RoleRef, obj.Subjects, obj.ObjectMeta) } return nil } -func deploymentSpecSetter(deployment *appsv1.Deployment, spec appsv1.DeploymentSpec) controllerutil.MutateFn { +func deploymentSpecSetter( + deployment *appsv1.Deployment, + spec appsv1.DeploymentSpec, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { return func() error { + deployment.Labels = objectMeta.Labels + deployment.Annotations = objectMeta.Annotations deployment.Spec = spec return nil } } -func serviceSpecSetter(service *corev1.Service, spec corev1.ServiceSpec) controllerutil.MutateFn { +func serviceSpecSetter( + service *corev1.Service, + spec corev1.ServiceSpec, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { return func() error { + service.Labels = objectMeta.Labels + service.Annotations = objectMeta.Annotations service.Spec = spec return nil } } -func configMapSpecSetter(configMap *corev1.ConfigMap, data map[string]string) controllerutil.MutateFn { +func serviceAccountSpecSetter( + serviceAccount *corev1.ServiceAccount, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { return func() error { + serviceAccount.Labels = objectMeta.Labels + serviceAccount.Annotations = objectMeta.Annotations + return nil + } +} + +func configMapSpecSetter( + configMap *corev1.ConfigMap, + data map[string]string, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { + return func() error { + // this check ensures we don't trigger an unnecessary update to the agent ConfigMap + // and trigger a Deployment restart + if maps.Equal(configMap.Labels, objectMeta.Labels) && + maps.Equal(configMap.Annotations, objectMeta.Annotations) && + maps.Equal(configMap.Data, data) { + return nil + } + + configMap.Labels = objectMeta.Labels + configMap.Annotations = objectMeta.Annotations configMap.Data = data return nil } } -func secretSpecSetter(secret *corev1.Secret, data map[string][]byte) controllerutil.MutateFn { +func secretSpecSetter( + secret *corev1.Secret, + data map[string][]byte, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { return func() error { + secret.Labels = objectMeta.Labels + secret.Annotations = objectMeta.Annotations secret.Data = data return nil } } -func roleSpecSetter(role *rbacv1.Role, rules []rbacv1.PolicyRule) controllerutil.MutateFn { +func roleSpecSetter( + role *rbacv1.Role, + rules []rbacv1.PolicyRule, + objectMeta metav1.ObjectMeta, +) controllerutil.MutateFn { return func() error { + role.Labels = objectMeta.Labels + role.Annotations = objectMeta.Annotations role.Rules = rules return nil } @@ -69,8 +121,11 @@ func roleBindingSpecSetter( roleBinding *rbacv1.RoleBinding, roleRef rbacv1.RoleRef, subjects []rbacv1.Subject, + objectMeta metav1.ObjectMeta, ) controllerutil.MutateFn { return func() error { + roleBinding.Labels = objectMeta.Labels + roleBinding.Annotations = objectMeta.Annotations roleBinding.RoleRef = roleRef roleBinding.Subjects = subjects return nil diff --git a/internal/mode/static/provisioner/templates.go b/internal/mode/static/provisioner/templates.go index 326cac7478..87a667ef87 100644 --- a/internal/mode/static/provisioner/templates.go +++ b/internal/mode/static/provisioner/templates.go @@ -45,7 +45,6 @@ allowed_directories: - /usr/share/nginx - /var/run/nginx features: -- connection - configuration - certificates {{- if .EnableMetrics }} diff --git a/internal/mode/static/state/conditions/conditions.go b/internal/mode/static/state/conditions/conditions.go index 9452ceb533..e864e12103 100644 --- a/internal/mode/static/state/conditions/conditions.go +++ b/internal/mode/static/state/conditions/conditions.go @@ -53,14 +53,6 @@ const ( // invalid. Used with ResolvedRefs (false). RouteReasonInvalidFilter v1.RouteConditionReason = "InvalidFilter" - // GatewayReasonGatewayConflict indicates there are multiple Gateway resources to choose from, - // and we ignored the resource in question and picked another Gateway as the winner. - // This reason is used with GatewayConditionAccepted (false). - GatewayReasonGatewayConflict v1.GatewayConditionReason = "GatewayConflict" - - // GatewayMessageGatewayConflict is a message that describes GatewayReasonGatewayConflict. - GatewayMessageGatewayConflict = "The resource is ignored due to a conflicting Gateway resource" - // GatewayReasonUnsupportedValue is used with GatewayConditionAccepted (false) when a value of a field in a Gateway // is invalid or not supported. GatewayReasonUnsupportedValue v1.GatewayConditionReason = "UnsupportedValue" @@ -574,19 +566,6 @@ func NewGatewayAccepted() conditions.Condition { } } -// NewGatewayConflict returns Conditions that indicate the Gateway has a conflict with another Gateway. -func NewGatewayConflict() []conditions.Condition { - return []conditions.Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonGatewayConflict), - Message: GatewayMessageGatewayConflict, - }, - NewGatewayConflictNotProgrammed(), - } -} - // NewGatewayAcceptedListenersNotValid returns a Condition that indicates the Gateway is accepted, // but has at least one listener that is invalid. func NewGatewayAcceptedListenersNotValid() conditions.Condition { @@ -668,17 +647,6 @@ func NewGatewayNotProgrammedInvalid(msg string) conditions.Condition { } } -// NewGatewayConflictNotProgrammed returns a custom Programmed Condition that indicates the Gateway has a -// conflict with another Gateway. -func NewGatewayConflictNotProgrammed() conditions.Condition { - return conditions.Condition{ - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonGatewayConflict), - Message: GatewayMessageGatewayConflict, - } -} - // NewNginxGatewayValid returns a Condition that indicates that the NginxGateway config is valid. func NewNginxGatewayValid() conditions.Condition { return conditions.Condition{ From a66267bb65ef4aa33c77a1ed1a7fe1df5266e76a Mon Sep 17 00:00:00 2001 From: bjee19 <139261241+bjee19@users.noreply.github.com> Date: Wed, 14 May 2025 09:38:21 -0700 Subject: [PATCH 32/32] Remove unused service annotations (#3362) * Remove unused service annotations * Remove files for aws-nlb that rely on service annotations --- apis/v1alpha2/nginxproxy_types.go | 5 - apis/v1alpha2/zz_generated.deepcopy.go | 7 - charts/nginx-gateway-fabric/README.md | 5 +- .../nginx-gateway-fabric/values.schema.json | 6 - charts/nginx-gateway-fabric/values.yaml | 3 - .../bases/gateway.nginx.org_nginxproxies.yaml | 5 - deploy/aws-nlb/deploy.yaml | 421 ------------------ deploy/crds.yaml | 5 - examples/helm/README.md | 1 - examples/helm/aws-nlb/values.yaml | 8 - 10 files changed, 2 insertions(+), 464 deletions(-) delete mode 100644 deploy/aws-nlb/deploy.yaml delete mode 100644 examples/helm/aws-nlb/values.yaml diff --git a/apis/v1alpha2/nginxproxy_types.go b/apis/v1alpha2/nginxproxy_types.go index 13013d81cf..99daf4cfa9 100644 --- a/apis/v1alpha2/nginxproxy_types.go +++ b/apis/v1alpha2/nginxproxy_types.go @@ -523,11 +523,6 @@ type ServiceSpec struct { // +optional LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - // Annotations contain any Service-specific annotations. - // - // +optional - Annotations map[string]string `json:"annotations,omitempty"` - // LoadBalancerSourceRanges are the IP ranges (CIDR) that are allowed to access the load balancer. // Requires service type to be LoadBalancer. // diff --git a/apis/v1alpha2/zz_generated.deepcopy.go b/apis/v1alpha2/zz_generated.deepcopy.go index bd6d81bca2..c0ddf4ed6e 100644 --- a/apis/v1alpha2/zz_generated.deepcopy.go +++ b/apis/v1alpha2/zz_generated.deepcopy.go @@ -550,13 +550,6 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(string) **out = **in } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } if in.LoadBalancerSourceRanges != nil { in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges *out = make([]string, len(*in)) diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 8ed532ea95..7904cbaacc 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -258,7 +258,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `certGenerator.overwrite` | Overwrite existing TLS Secrets on startup. | bool | `false` | | `certGenerator.serverTLSSecretName` | The name of the Secret containing TLS CA, certificate, and key for the NGINX Gateway Fabric control plane to securely communicate with the NGINX Agent. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"server-tls"` | | `clusterDomain` | The DNS cluster domain of your Kubernetes cluster. | string | `"cluster.local"` | -| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"annotations":{},"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | +| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"config":{},"container":{},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | | `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.container` | The container configuration for the NGINX container. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.debug` | Enable debugging for NGINX. Uses the nginx-debug binary. The NGINX error log level should be set to debug in the NginxProxy resource. | bool | `false` | @@ -269,8 +269,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginx.plus` | Is NGINX Plus image being used. | bool | `false` | | `nginx.pod` | The pod configuration for the NGINX data plane pod. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | | `nginx.replicas` | The number of replicas of the NGINX Deployment. | int | `1` | -| `nginx.service` | The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{"annotations":{},"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"}` | -| `nginx.service.annotations` | The annotations of the NGINX data plane service. | object | `{}` | +| `nginx.service` | The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"type":"LoadBalancer"}` | | `nginx.service.externalTrafficPolicy` | The externalTrafficPolicy of the service. The value Local preserves the client source IP. | string | `"Local"` | | `nginx.service.loadBalancerClass` | LoadBalancerClass is the class of the load balancer implementation this Service belongs to. Requires nginx.service.type set to LoadBalancer. | string | `""` | | `nginx.service.loadBalancerIP` | The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. | string | `""` | diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index 806512fce8..67250a526f 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -356,12 +356,6 @@ "service": { "description": "The service configuration for the NGINX data plane. This is applied globally to all Gateways managed by this\ninstance of NGINX Gateway Fabric.", "properties": { - "annotations": { - "description": "The annotations of the NGINX data plane service.", - "required": [], - "title": "annotations", - "type": "object" - }, "externalTrafficPolicy": { "default": "Local", "description": "The externalTrafficPolicy of the service. The value Local preserves the client source IP.", diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index 077faeff80..a42779b1c2 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -425,9 +425,6 @@ nginx: # -- The externalTrafficPolicy of the service. The value Local preserves the client source IP. externalTrafficPolicy: Local - # -- The annotations of the NGINX data plane service. - annotations: {} - # -- The static IP address for the load balancer. Requires nginx.service.type set to LoadBalancer. loadBalancerIP: "" diff --git a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml index a74ea35b32..2e961a2c10 100644 --- a/config/crd/bases/gateway.nginx.org_nginxproxies.yaml +++ b/config/crd/bases/gateway.nginx.org_nginxproxies.yaml @@ -3467,11 +3467,6 @@ spec: service: description: Service is the configuration for the NGINX Service. properties: - annotations: - additionalProperties: - type: string - description: Annotations contain any Service-specific annotations. - type: object externalTrafficPolicy: default: Local description: |- diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml deleted file mode 100644 index 3f82b66bce..0000000000 --- a/deploy/aws-nlb/deploy.yaml +++ /dev/null @@ -1,421 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-gateway ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-cert-generator - namespace: nginx-gateway ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-cert-generator - namespace: nginx-gateway -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - update - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway -rules: -- apiGroups: - - "" - - apps - resources: - - secrets - - configmaps - - serviceaccounts - - services - - deployments - verbs: - - create - - update - - delete - - list - - get - - watch -- apiGroups: - - "" - resources: - - namespaces - - pods - verbs: - - get - - list - - watch -- apiGroups: - - apps - resources: - - replicasets - verbs: - - get - - list -- apiGroups: - - "" - resources: - - nodes - verbs: - - list -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - gateway.networking.k8s.io - resources: - - gatewayclasses - - gateways - - httproutes - - referencegrants - - grpcroutes - verbs: - - list - - watch -- apiGroups: - - gateway.networking.k8s.io - resources: - - httproutes/status - - gateways/status - - gatewayclasses/status - - grpcroutes/status - verbs: - - update -- apiGroups: - - gateway.nginx.org - resources: - - nginxgateways - verbs: - - get - - list - - watch -- apiGroups: - - gateway.nginx.org - resources: - - nginxproxies - - clientsettingspolicies - - observabilitypolicies - - upstreamsettingspolicies - verbs: - - list - - watch -- apiGroups: - - gateway.nginx.org - resources: - - nginxgateways/status - - clientsettingspolicies/status - - observabilitypolicies/status - - upstreamsettingspolicies/status - verbs: - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-cert-generator - namespace: nginx-gateway -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-gateway-cert-generator -subjects: -- kind: ServiceAccount - name: nginx-gateway-cert-generator - namespace: nginx-gateway ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-gateway -subjects: -- kind: ServiceAccount - name: nginx-gateway - namespace: nginx-gateway ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway -spec: - ports: - - name: agent-grpc - port: 443 - protocol: TCP - targetPort: 8443 - selector: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway - namespace: nginx-gateway -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - spec: - containers: - - args: - - controller - - --gateway-ctlr-name=gateway.nginx.org/nginx-gateway-controller - - --gatewayclass=nginx - - --config=nginx-gateway-config - - --service=nginx-gateway - - --agent-tls-secret=agent-tls - - --metrics-port=9113 - - --health-port=8081 - - --leader-election-lock-name=nginx-gateway-leader-election - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_UID - valueFrom: - fieldRef: - fieldPath: metadata.uid - - name: INSTANCE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['app.kubernetes.io/instance'] - - name: IMAGE_NAME - value: ghcr.io/nginx/nginx-gateway-fabric:edge - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: nginx-gateway - ports: - - containerPort: 8443 - name: agent-grpc - - containerPort: 9113 - name: metrics - - containerPort: 8081 - name: health - readinessProbe: - httpGet: - path: /readyz - port: health - initialDelaySeconds: 3 - periodSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /var/run/secrets/ngf - name: nginx-agent-tls - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway - terminationGracePeriodSeconds: 30 - volumes: - - name: nginx-agent-tls - secret: - secretName: server-tls ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-cert-generator - namespace: nginx-gateway -spec: - template: - metadata: - annotations: null - spec: - containers: - - args: - - generate-certs - - --service=nginx-gateway - - --cluster-domain=cluster.local - - --server-tls-secret=server-tls - - --agent-tls-secret=agent-tls - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: ghcr.io/nginx/nginx-gateway-fabric:edge - imagePullPolicy: Always - name: cert-generator - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 1001 - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - securityContext: - fsGroup: 1001 - runAsNonRoot: true - serviceAccountName: nginx-gateway-cert-generator - ttlSecondsAfterFinished: 0 ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: GatewayClass -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx -spec: - controllerName: gateway.nginx.org/nginx-gateway-controller - parametersRef: - group: gateway.nginx.org - kind: NginxProxy - name: nginx-gateway-proxy-config - namespace: nginx-gateway ---- -apiVersion: gateway.nginx.org/v1alpha1 -kind: NginxGateway -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-config - namespace: nginx-gateway -spec: - logging: - level: info ---- -apiVersion: gateway.nginx.org/v1alpha2 -kind: NginxProxy -metadata: - labels: - app.kubernetes.io/instance: nginx-gateway - app.kubernetes.io/name: nginx-gateway - app.kubernetes.io/version: edge - name: nginx-gateway-proxy-config - namespace: nginx-gateway -spec: - kubernetes: - deployment: - container: - image: - pullPolicy: Always - repository: ghcr.io/nginx/nginx-gateway-fabric/nginx - tag: edge - replicas: 1 - service: - annotations: - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-type: external - externalTrafficPolicy: Local - type: LoadBalancer diff --git a/deploy/crds.yaml b/deploy/crds.yaml index abb653ccc4..3f37a7e7bb 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -4052,11 +4052,6 @@ spec: service: description: Service is the configuration for the NGINX Service. properties: - annotations: - additionalProperties: - type: string - description: Annotations contain any Service-specific annotations. - type: object externalTrafficPolicy: default: Local description: |- diff --git a/examples/helm/README.md b/examples/helm/README.md index 7d66f2ee4a..e6b6edfcc7 100644 --- a/examples/helm/README.md +++ b/examples/helm/README.md @@ -14,7 +14,6 @@ This directory contains examples of Helm charts that can be used to deploy NGINX The secret must be created in the same namespace as the NGINX Gateway Fabric deployment. - [Experimental](./experimental) - deploys NGINX Gateway Fabric with the Gateway API experimental features enabled and NGINX OSS as the data plane. - [Experimental with NGINX Plus](./experimental-nginx-plus) - deploys NGINX Gateway Fabric with the Gateway API experimental features enabled and NGINX Plus as the data plane. The image is pulled from the NGINX Plus Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. The secret must be created in the same namespace as the NGINX Gateway Fabric deployment. -- [AWS NLB](./aws-nlb) - deploys NGINX Gateway Fabric with NGINX OSS using a Service of type `LoadBalancer` to allocate an AWS Network Load Balancer (NLB). - [Azure](./azure) - deploys NGINX Gateway Fabric with NGINX OSS using a nodeSelector to deploy the gateway on Linux nodes in an Azure Kubernetes Service (AKS) cluster. - [NodePort](./nodeport) - deploys NGINX Gateway Fabric with NGINX OSS using a Service of type `NodePort` to expose the gateway on a specific port on each node. diff --git a/examples/helm/aws-nlb/values.yaml b/examples/helm/aws-nlb/values.yaml deleted file mode 100644 index 3034ca995f..0000000000 --- a/examples/helm/aws-nlb/values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -nginxGateway: - name: nginx-gateway -nginx: - service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "external" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip"