diff --git a/.cloudcannon/initial-site-settings.json b/.cloudcannon/initial-site-settings.json deleted file mode 100644 index ec214b7e0..000000000 --- a/.cloudcannon/initial-site-settings.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "ssg": "hugo", - "mode": "hosted", - "build": { - "install_command": "[ -f package.json ] && npm i", - "build_command": "hugo --destination public --baseURL / --noTimes", - "output_path": "public", - "environment_variables": [ - { - "key": "HUGO_CACHEDIR", - "value": "/usr/local/__site/src/.hugo_cache/" - } - ], - "preserved_paths": "node_modules/,.hugo_cache/,resources/", - "preserve_output": false, - "include_git": true, - "manually_configure_urls": false, - "hugo_version": "0.134.3", - "ruby_version": "2.7.3", - "node_version": "18", - "deno_version": "1.40.2" - } -} diff --git a/.cloudcannon/postbuild b/.cloudcannon/postbuild deleted file mode 100755 index e336d37e0..000000000 --- a/.cloudcannon/postbuild +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -## Find and fix broken images and links caused by how the baseURL is set in the config.yml -find public -type f -name "*.html" -exec sed -i 's/="\/\//="\//g' {} \; - -## Add the CloudCannon 'editable' class to content divs -find public -type f -name "index.html" -exec sed -i 's/class="content/& editable/g' {} \; diff --git a/.cloudcannon/prebuild b/.cloudcannon/prebuild deleted file mode 100755 index cf037d189..000000000 --- a/.cloudcannon/prebuild +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -## check the version of the hugo theme currently being used -## if it is different from the one defined in the go.mod file -## then run hugo mod get -u to update the theme - -theme_version=$(grep -o 'v[0-9]\+\(\.[0-9]\+\)\{2\}' go.mod) - -current_theme_version=$(hugo mod graph | grep -o 'v[0-9]\+\(\.[0-9]\+\)\{2\}') - -printf "Theme version in go.mod is: %s \n" "$theme_version" - -printf "Theme version in use by Hugo is: %s \n" "$current_theme_version" - - -# if the theme version in go.mod is the same as the one in use by Hugo, run hugo mod clean to clear any outdated theme files from the cache -# if the theme version in go.mod is different from the one in use by Hugo, update it by running hugo mod get -u - -if [ "$theme_version" == "$current_theme_version" ]; then - printf "Theme version in go.mod is the same as the one in use by Hugo. Cleaning the cache. \n" - hugo mod clean ; -elif [ "$theme_version" != "$current_theme_version" ]; then - printf "Updating theme version to %s \n" "$current_theme_version" - hugo mod get -u ; -# if an error occurs, exit with a non-zero status code -else - printf "An error occurred: unable to retrieve the latest version of the Hugo theme.\n" - exit 1 -fi diff --git a/.cloudcannon/preinstall b/.cloudcannon/preinstall deleted file mode 100644 index ccd94ee41..000000000 --- a/.cloudcannon/preinstall +++ /dev/null @@ -1 +0,0 @@ -install-hugo 0.134.2 diff --git a/.cloudcannon/schemas/concept.md b/.cloudcannon/schemas/concept.md deleted file mode 100644 index 58412247a..000000000 --- a/.cloudcannon/schemas/concept.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: -# Remove or change to false to turn off the right-hand in-page ToC -toc: true -# Add a short description (150 chars or less) for the doc. Include keywords for SEO. -# The description text appears in search results and at the top of the doc. -description: "" -# Assign weights in increments of 100 -# Lower numbers appear higher in the document list -weight: -tags: [ "docs" ] ---- - -## Overview - -Briefly describe the goal of this document, that is, what the user will learn or accomplish by reading what follows. - -## Concept 1 - format as a noun phrase - -This is where you explain the concept. Provide information that will help the user understand what the element/feature is and how it fits into the overall product. - -Organize content in this section with H3 and H4 headings. - -## Concept 2 - format as a noun phrase - -## Concept 3 - format as a noun phrase - -## What's Next - -- Provide up to 5 links to related topics (optional). -- Format as a bulleted list. diff --git a/.cloudcannon/schemas/default.md b/.cloudcannon/schemas/default.md deleted file mode 100644 index 474baab89..000000000 --- a/.cloudcannon/schemas/default.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: -# Remove or set to false to turn off the right-hand in-page ToC -toc: true -# Add a short description (150 chars or less) for the doc. Include keywords for SEO. -# The description text appears in search results and at the top of the doc. -description: "" -# Assign weights in increments of 100 -# Lower numbers appear higher in the document list -weight: -tags: [ "docs" ] ---- - -## Overview - -Briefly describe the goal of this document, that is, what the user will learn or accomplish by reading what follows. - -Introduce and explain any new concepts the user may need to understand before proceeding. - -## Before You Begin - -To complete the instructions in this guide, you need the following: - -1. Provide any prerequisites here. -2. Format as a numbered or bulleted list as appropriate. -3. Keep the list entries grammatically parallel.1. Provide any prerequisites here. - -## Goal 1 - write as a verb phrase - -Add introductory text. Say what the user will be doing. - -To do xzy, take the following steps: - -1. This is where you provide the steps that the user must take to accomplish the goal. - - ```bash - code examples should be nested within the list - ``` - -2. Format as numbered lists. - - {{< call-out "note" >}}Add notes like this.{{< /call-out >}} - -3. If there is only one step, you don't need to format it as a numbered list. - -## Goal 2 - write as a verb phrase - -## Goal 3 - write as a verb phrase - -## Discussion - -Use the discussion section to expand on the information presented in the steps above. - -This section contains the "why" information. - -This information lives at the end of the document so that users who just want to follow the steps don't have to scroll through a wall of explanatory text to find them. - -## Verification - -Explain how the user can verify the steps completed successfully. - -## What's Next - -- Provide up to 5 links to related topics (optional). -- Format as a bulleted list. diff --git a/.cloudcannon/schemas/headless-collection.md b/.cloudcannon/schemas/headless-collection.md deleted file mode 100644 index 3d65eaa0f..000000000 --- a/.cloudcannon/schemas/headless-collection.md +++ /dev/null @@ -1,3 +0,0 @@ ---- -headless: true ---- \ No newline at end of file diff --git a/.cloudcannon/schemas/includes.md b/.cloudcannon/schemas/includes.md deleted file mode 100644 index 8b447a2cc..000000000 --- a/.cloudcannon/schemas/includes.md +++ /dev/null @@ -1,3 +0,0 @@ ---- -docs: "" ---- \ No newline at end of file diff --git a/.cloudcannon/schemas/nms/policy.md b/.cloudcannon/schemas/nms/policy.md deleted file mode 100644 index bd2d670c3..000000000 --- a/.cloudcannon/schemas/nms/policy.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: -# Remove or set to false to turn off the right-hand in-page ToC -toc: true -# Add a short description (150 chars or less) for the doc. Include keywords for SEO. -# The description text appears in search results and at the top of the doc. -description: "" -# Assign weights in increments of 100 -# Lower numbers appear higher in the document list -weight: -tags: [ "docs" ] ---- - -## Overview - - - ---- - -## About XYZ Policy - - - -#### Intended Audience - - - ---- - -## Workflow for Applying Policy - - - ---- - -## Policy Settings - - - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default | -|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| -| `users.id` | integer | A unique int >= 1 | Description for value. | Yes | System assigned | -| `users.name` | string | Example: `Jane Doe` | A short description of what the field is used for. | Yes | Add the default value | -| `user.age` | integer | 1–110 | Description for the value | Yes | | - -{{< /bootstrap-table >}} - - ---- - -## Adding XYZ Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out >}} - -To create an XYZ policy using the REST API, send an HTTP `POST` request to the Add-Endpoint-Name-Here endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|---------------------| -| `POST` | `/path/to/endpoint` | - -{{}} - - -
-JSON request - -``` json -{ - "users": [ - { - "id": 1, - "name": "John Doe", - "age": 24 - }, - { - "id": 2, - "name": "Jane Doe", - "age": 28 - } - ] -} -``` - -
- -{{%/tab%}} - -{{%tab name="UI"%}} - -To create an XYZ policy using the web interface: - -1. Go to the FQDN for your NGINX Management Suite host in a web browser and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. Add other steps here -3. As a numbered list. - -{{%/tab%}} - -{{
}} - ---- - -## Verify the Policy - - - -Confirm that the policy has been set up and configured correctly by taking these steps: - -- Add steps to verify policy was applied successfully to the the management plane. - -Confirm the policy is being enforced: - -- Add steps to verify policy is being enforced on the data plane. What can users expect to see? - ---- - -## Troubleshooting - - - -For help resolving common issues when setting up and configuring the policy, follow the steps in this section. If you cannot find a solution to your specific issue, reach out to [NGINX Customer Support]({{< ref "support/contact-support.md" >}}) for assistance. - -### Issue 1 - -Add a description for the issue. Include any error messages users might see. - -Resolution/Workaround: - -1. Add steps here -2. As a -3. numbered list. - -### Issue 2 - -Add a description for the issue. Include any error messages users might see. - -Resolution/Workaround: - -1. Add steps here -2. As a -3. numbered list. diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f08087005..392b2ecac 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -26,12 +26,16 @@ content/nginx/nms/agent/* @nginx/nginx-agent content/nap-dos/* @nginx/dos-docs-approvers # F5 WAF for NGINX -content/nap-waf/* @nginx/nap-docs-approvers -data/nap-waf/* @nginx/nap-docs-approvers +content/waf/* @nginx/waf +content/includes/waf/* @nginx/waf # NGINXaaS for Azure -content/nginxaas-azure/* @nginx/n4a-docs-approvers -content/includes/nginxaas-azure/* @nginx/n4a-docs-approvers +content/nginxaas-azure/* @nginx/nginxaas-docs-approvers +content/includes/nginxaas-azure/* @nginx/nginxaas-docs-approvers + +# NGINXaaS for Google Cloud +content/nginxaas-google/* @nginx/nginxaas-docs-approvers +content/includes/nginxaas-google/* @nginx/nginxaas-docs-approvers # NGINX Gateway Fabric content/ngf/* @nginx/nginx-gateway-fabric diff --git a/.github/ISSUE_TEMPLATE/1-feature_request.md b/.github/ISSUE_TEMPLATE/1-feature_request.md deleted file mode 100644 index 8b2820beb..000000000 --- a/.github/ISSUE_TEMPLATE/1-feature_request.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "" -labels: "enhancement" -assignees: "" ---- - -### Is your feature request related to a problem? Please describe - -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -### Describe the solution you'd like - -A clear and concise description of what you want to happen. - -### Describe alternatives you've considered - -A clear and concise description of any alternative solutions or features you've considered. - -### Additional context - -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/1-idea_suggestion.yml b/.github/ISSUE_TEMPLATE/1-idea_suggestion.yml new file mode 100644 index 000000000..8efc08162 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1-idea_suggestion.yml @@ -0,0 +1,44 @@ +name: Documentation idea +description: Suggest an idea for improving documentation +title: "[Idea]: " +body: + - type: textarea + attributes: + label: What is your idea for improving documentation? + description: Describe your idea with as much information as possible. + validations: + required: true + - type: dropdown + attributes: + label: Which product or products does this request relate to? + multiple: true + options: + - F5 DoS for NGINX + - F5 WAF for NGINX + - NGINX Agent + - NGINXaaS for Azure + - NGINXaaS for Google Cloud + - NGINX Gateway Fabric + - NGINX Ingress Controller + - NGINX Instance Manager + - NGINX One Console + - NGINX Plus + - Other + validations: + required: true + - type: textarea + attributes: + label: Is this idea related to a larger problem? + description: If you have identified multiple related issues, it might be a design pattern problem. + validations: + required: true + - type: textarea + attributes: + label: What alternative ways are there to implement your idea? + description: There are often multiple ways to something - context is important. + validations: + required: true + - type: textarea + attributes: + label: Any additional information + description: Add any remaining detail for this idea not covered by the above questions. diff --git a/.github/ISSUE_TEMPLATE/2-bug_report.md b/.github/ISSUE_TEMPLATE/2-bug_report.md deleted file mode 100644 index 12a6c787b..000000000 --- a/.github/ISSUE_TEMPLATE/2-bug_report.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: "" -labels: "bug" -assignees: "" ---- - -### Describe the bug - -A clear and concise description of what the bug is. - -### To reproduce - -Steps to reproduce the behavior: - -1. Deploy this project using [...] -2. View output/logs/configuration on [...] -3. See error - -### Expected behavior - -A clear and concise description of what you expected to happen. - -### Your environment - -- Version/release of this project or specific commit - -- Target deployment platform - -### Additional context - -Add any other context about the problem here. - -### Sensitive Information - -Remember to redact any sensitive information such as authentication credentials or license keys. diff --git a/.github/ISSUE_TEMPLATE/2-bug_report.yml b/.github/ISSUE_TEMPLATE/2-bug_report.yml new file mode 100644 index 000000000..879472441 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2-bug_report.yml @@ -0,0 +1,50 @@ +name: Bug report +description: Report an issue with our documentation +title: "[Bug]: " +body: + - type: textarea + attributes: + label: Describe the bug you have identified + description: Explain the problem with as much detail as possible. + validations: + required: true + - type: dropdown + attributes: + label: Which product or products does this request relate to? + multiple: true + options: + - F5 DoS for NGINX + - F5 WAF for NGINX + - NGINX Agent + - NGINXaaS for Azure + - NGINXaaS for Google Cloud + - NGINX Gateway Fabric + - NGINX Ingress Controller + - NGINX Instance Manager + - NGINX One Console + - NGINX Plus + - Other + validations: + required: true + - type: textarea + attributes: + label: Steps to reproduce the bug + description: Describe the where the issue occurs. + validations: + required: true + - type: textarea + attributes: + label: What is the expected or desired behaviour? + description: Describe what you expected to happen instead of the bug. + validations: + required: true + - type: textarea + attributes: + label: What environments or versions does this bug affect? + description: Describe the contexts which this bug seems to occur. + validations: + required: true + - type: textarea + attributes: + label: Any additional information + description: Add any remaining detail for this idea not covered by the above questions. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/3-general_request.yml b/.github/ISSUE_TEMPLATE/3-general_request.yml new file mode 100644 index 000000000..c0487e266 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/3-general_request.yml @@ -0,0 +1,56 @@ +name: General request +description: Make a general documentation request. +title: "[Request]: " +body: + - type: textarea + attributes: + label: What would you like the documentation team to work on? + description: Please describe your request with as much detail as possible. + validations: + required: true + - type: dropdown + attributes: + label: Which product or products does this request relate to? + multiple: true + options: + - F5 DoS for NGINX + - F5 WAF for NGINX + - NGINX Agent + - NGINXaaS for Azure + - NGINXaaS for Google Cloud + - NGINX Gateway Fabric + - NGINX Ingress Controller + - NGINX Instance Manager + - NGINX One Console + - NGINX Plus + - Other + validations: + required: true + - type: textarea + attributes: + label: Who is the person directly responsible for initiating this request? + description: Name the specific person that made this request. + validations: + required: true + - type: textarea + attributes: + label: Is this request part of a larger initiative or project? + description: If it is, name the project the request relates to. + validations: + required: true + - type: textarea + attributes: + label: Are there important constraints for this request? + description: If the constraints are particularly complex, you may wish to organise a meeting before filing a request. + validations: + required: true + - type: textarea + attributes: + label: Does this request have a due date? + description: You should name any specific date or milestones (Such as "Q3") you have in mind for this request, and any dependencies or blockers affecting it. + validations: + required: true + - type: textarea + attributes: + label: Any additional information + description: Add any remaining detail for this request not covered by the above questions. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/3-story_task.md b/.github/ISSUE_TEMPLATE/3-story_task.md deleted file mode 100644 index b2f7f117d..000000000 --- a/.github/ISSUE_TEMPLATE/3-story_task.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: Story task -about: This template is for a story or task, encompassing a single work item for completion -title: "" -labels: "documentation" -projects: ["nginxinc/32"] -assignees: "" ---- - -*Remove italicized directions as relevant to reduce noise in the issue description.* - -## Overview - -*Written as a user story*. - -**As a** , **I want** , **So I can** . - -## Description - -*Add the finer details of what this task involves and is trying to accomplish. A problem well defined is half-solved*. - -## Acceptance criteria - -*Add any exacting acceptance criteria for the task to be complete. Start with known hard requirements, since they may create blockers.* \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/4-epic_overview.md b/.github/ISSUE_TEMPLATE/4-epic_overview.md deleted file mode 100644 index 53e13124a..000000000 --- a/.github/ISSUE_TEMPLATE/4-epic_overview.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -name: Epic overview -about: This template is for planning an epic, which is a large body of effort involving multiple stories or tasks -title: "" -labels: "epic, documentation" -projects: ["nginxinc/32"] -assignees: "" ---- - -*Remove italicized directions as relevant to reduce noise in the issue description.* - -## Description - -*Write a high-level description of what the body of work for this epic includes.* - -## Goals - -*Describe the intent of the epic and what the intended impact is for this effort.* - -## User stories - -*Add a user story for relevant persona to this epic, who are the stakeholders*. - -**As a** , -**I want** , -**So I can** . - -**As a** , -**I want** , -**So I can** . - -## Tasks - -*Create a simple list of tasks necessary for this epic. Finer details should be kept to sub-issues/tasks/stories.* - -- Example task 1 -- Example task 1 -- Example task 1 -- Example task 1 -- Example task 1 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/4-story_task.yml b/.github/ISSUE_TEMPLATE/4-story_task.yml new file mode 100644 index 000000000..341f1ea89 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/4-story_task.yml @@ -0,0 +1,41 @@ +name: User story +description: This template is for a single story, encompassing a single work item for completion +title: "[Story]: " +body: + - type: textarea + attributes: + label: Overview + description: Use this section to write user stories. + placeholder: As a user, I want thing, So I can action. + validations: + required: true + - type: dropdown + attributes: + label: Which product or products does this request relate to? + multiple: true + options: + - F5 DoS for NGINX + - F5 WAF for NGINX + - NGINX Agent + - NGINXaaS for Azure + - NGINXaaS for Google Cloud + - NGINX Gateway Fabric + - NGINX Ingress Controller + - NGINX Instance Manager + - NGINX One Console + - NGINX Plus + - Other + validations: + required: true + - type: textarea + attributes: + label: Description + description: Add the finer details of what this task involves and is trying to accomplish. + validations: + required: true + - type: textarea + attributes: + label: Acceptance criteria + description: Acceptance criteria are written from a user perspective, and should map to the user stories. + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/5-content_plan.md b/.github/ISSUE_TEMPLATE/5-content_plan.md deleted file mode 100644 index d68f7d5c2..000000000 --- a/.github/ISSUE_TEMPLATE/5-content_plan.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: Content release plan -about: This template is for a content release plan, typically tied to a product release -title: " v#.# content release plan" -labels: "documentation" -projects: ["nginxinc/32"] -assignees: "" ---- - -*Remove italicized directions as relevant to reduce noise in the issue description.* - -## Overview - -- **Product name**: -- **Release date**: - -A content release plan establishes and tracks the documentation work for a product related to a release. - -Add tickets to this content release plan as sub-issues, and update it as you go along. - -## Description - -*Write a high-level summary of changes expected in this release*. - -## User stories - -**As a** technical writer, -**I want** a content release plan for my product, -**So I can** ensure correct content is released alongside the latest version of the product. - -**As a** product owner, -**I want** a content release plan for my product, -**So I can** ensure the product team includes documentation as part of changes to the product. - -## Tasks - -*Create a simple list of tasks necessary for this content plan. Finer details can be kept to sub-issues.* -*Each task item should have a 1:1 relationship with a documentation item, which could be an engineering issue.* - -- [ ] Update changelog/release notes -- [ ] Update version reference information (Such as technical specifications, version shortcodes) -- [ ] Update any other documentation impacted by changes in this release -- Additional task 1 -- Additional task 2 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/5-content_plan.yml b/.github/ISSUE_TEMPLATE/5-content_plan.yml new file mode 100644 index 000000000..a5a069e6a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/5-content_plan.yml @@ -0,0 +1,28 @@ +name: Content release plan +description: This template is for a content release plan, typically tied to a product release +title: " v#.# content release plan" +body: + - type: textarea + attributes: + label: Overview + description: A content release plan establishes and tracks the documentation work for a product related to a release. + validations: + required: true + - type: textarea + attributes: + label: Description + description: Write a high-level summary of changes expected in this release. + validations: + required: true + - type: textarea + attributes: + label: User stories + description: As a user, I want thing, So I can action. + validations: + required: true + - type: textarea + attributes: + label: Tasks + description: Create a simple list of tasks necessary for this content plan. Finer details can be kept to sub-issues. + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/6-epic_overview.yml b/.github/ISSUE_TEMPLATE/6-epic_overview.yml new file mode 100644 index 000000000..bf7aebb87 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/6-epic_overview.yml @@ -0,0 +1,28 @@ +name: Epic overview +description: This template is for planning an epic, which is a large body of effort involving multiple stories or tasks +title: " " +body: + - type: textarea + attributes: + label: Description + description: Write a high-level description of what the body of work for this epic includes. + validations: + required: true + - type: textarea + attributes: + label: Goals + description: Describe the intent of the epic and what the intended impact is for this effort. + validations: + required: true + - type: textarea + attributes: + label: User stories + description: As a user, I want thing, So I can action. + validations: + required: true + - type: textarea + attributes: + label: Tasks + description: Create a simple list of tasks necessary for this epic. Finer details should be kept to sub-issues/tasks/stories. + validations: + required: true \ No newline at end of file diff --git a/.github/labeler.yml b/.github/labeler.yml index 2e8d1f8d7..d3684b539 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -23,6 +23,12 @@ product/amplify: - changed-files: - any-glob-to-any-file: 'content/amplify/**' +product/dos: + - changed-files: + - any-glob-to-any-file: + - 'content/nap-dos/**' + - 'content/includes/nap-dos/**' + product/controller: - changed-files: - any-glob-to-any-file: @@ -37,18 +43,6 @@ product/modsec-waf: - changed-files: - any-glob-to-any-file: 'content/modsec-waf/**' -product/nap-dos: - - changed-files: - - any-glob-to-any-file: - - 'content/nap-dos/**' - - 'content/includes/nap-dos/**' - -product/nap-waf: - - changed-files: - - any-glob-to-any-file: - - 'content/nap-waf/**' - - 'content/includes/nap-waf/**' - product/ngf: - changed-files: - any-glob-to-any-file: @@ -97,12 +91,19 @@ product/unit: - 'content/unit/**' - 'content/includes/unit/**' +product/waf: + - changed-files: + - any-glob-to-any-file: + - 'content/waf/**' + - 'content/includes/waf/**' + # Other labels process documentation: - changed-files: - any-glob-to-any-file: - 'templates/**' + - 'documentation/**' - '*.md' - 'LICENSE' @@ -111,7 +112,6 @@ tooling: - any-glob-to-any-file: - 'layouts/**' - '.github/**' - - '.cloudcannon/**' - 'styles/**' - 'layouts/**' - 'config/**' @@ -123,6 +123,8 @@ tooling: - '*.sh' - '*.js' - 'Makefile' - - '.vale.ini' - - '.gitignore' - '.gitattributes' + - '.gitignore' + - '.gitlint' + - '.markdownlint.yaml' + - '.vale.ini' diff --git a/.github/workflows/coveo.yml b/.github/workflows/coveo.yml index c8f849c8f..b58daaa77 100644 --- a/.github/workflows/coveo.yml +++ b/.github/workflows/coveo.yml @@ -8,6 +8,9 @@ jobs: generate-coveo-search-token: name: Generate Coveo Search Tokens runs-on: ubuntu-latest + permissions: + contents: read + actions: read # for 8398a7/action-slack strategy: matrix: include: @@ -25,6 +28,8 @@ jobs: run: sudo apt-get install jq - name: Generating token for ${{matrix.env_name}} ... + id: generate-token + continue-on-error: true env: COVEO_API_KEY: ${{secrets[matrix.env_api_key]}} COVEO_SEARCH_HUB: "HUB_ES_Nginx_Docs_And_Org" @@ -61,18 +66,42 @@ jobs: echo "{\"token\": \"$SEARCH_TOKEN\", \"org_id\": \"${{matrix.env_coveo_org_id}}\"}" > coveo/search_token.json - name: Upload token for ${{matrix.env_name}} - uses: actions/upload-artifact@v4 + if: ${{ steps.generate-token.outcome == 'success' }} + uses: actions/upload-artifact@v5 with: name: ${{matrix.env_name}} path: "./" + - name: Send a notification if token generation step failed + if: ${{ steps.generate-token.outcome == 'failure' }} + uses: 8398a7/action-slack@77eaa4f1c608a7d68b38af4e3f739dcd8cba273e # v3.19.0 + with: + status: custom + custom_payload: | + { + username: 'Github', + mention: 'channel', + attachments: [{ + title: '[${{ github.event.repository.full_name }}] Coveo Token Generation Failed (${{matrix.env_name}} environment)', + color: 'danger', + fields: [{ + title: 'Pipeline URL', + value: '', + short: false + }] + }] + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DOCS_INCIDENT }} + + push-tokens-to-azure: - name: Push coveo search tokens to Azure + name: Batch push coveo search tokens to Azure runs-on: ubuntu-latest needs: generate-coveo-search-token steps: - name: Download Coveo search token - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 - name: View files run: ls -R diff --git a/.github/workflows/linkchecker-bad.yml b/.github/workflows/linkchecker-bad.yml deleted file mode 100644 index 9946fb96c..000000000 --- a/.github/workflows/linkchecker-bad.yml +++ /dev/null @@ -1,42 +0,0 @@ -# This is a TEMPORARY Action that is testing the docs with existing broken URLs -# Once issues have been resolved and the project docs are passing the jobs will be moved into QE LinkChecker - -name: QE LinkChecker Failing - -# This workflow runs on a schedule or can be run manually -on: - workflow_dispatch: - schedule: - # Runs every day at 8am UTC - - cron: '0 8 * * *' - -jobs: - nginx-plus: - runs-on: ubuntu-latest - steps: - # Install LinkChecker - - name: Install LinkChecker - run: sudo apt-get update && sudo apt-get install -y linkchecker - # Run LinkChecker on nginx-plus docs - - name: Run LinkChecker nginx-plus - run: linkchecker https://docs.nginx.com/nginx/ --no-warnings --check-extern --ignore-url ^https://consent.trustarc.com --ignore-url ^http://backend1.example.com --ignore-url ^http://www.example.com --ignore-url ^http://example.com --ignore-url ^https://my-nginx.example.com --ignore-url ^https://www.nginxroute53.com --ignore-url ^http://cafe --ignore-url ^http://192.168.1.23 --ignore-url ^https://company.com --ignore-url ^https://my-nginx-plus.example.com --ignore-url ^https://cognito-idp --ignore-url ^https:///www.okta.com - - nginx-oss: - runs-on: ubuntu-latest - steps: - # Install LinkChecker - - name: Install LinkChecker - run: sudo apt-get update && sudo apt-get install -y linkchecker - # Run LinkChecker on nginx-oss docs - - name: Run LinkChecker nginx-oss - run: linkchecker https://nginx.org/en/docs/ --no-warnings --check-extern - - nginx-agent: - runs-on: ubuntu-latest - steps: - # Install LinkChecker - - name: Install LinkChecker - run: sudo apt-get update && sudo apt-get install -y linkchecker - # Run LinkChecker on nginx-agent docs - - name: Run LinkChecker nginx-agent - run: linkchecker https://docs.nginx.com/nginx-agent/ --no-warnings --check-extern --ignore-url ^https://consent.trustarc.com --ignore-url ^http://localhost diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 293a4bcc9..5e279976a 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -36,8 +36,9 @@ env: --ignore-url ^http://backend1.example.com --ignore-url ^http://example.com --ignore-url ^https://my-nginx.example.com --ignore-url ^https://www.nginxroute53.com --ignore-url ^http://cafe --ignore-url ^http://192.168.1.23 --ignore-url ^https://company.com --ignore-url ^https://my-nginx-plus.example.com --ignore-url ^https://cognito-idp --ignore-url ^https:///www.okta.com - --ignore-url ^http://www.maxmind.com --ignore-url ^https://www.maxmind.com --ignore-url ^https://www.opswat.com + --ignore-url ^http://www.maxmind.com --ignore-url ^https://www.maxmind.com --ignore-url ^https://www.opswat.com --ignore-url ^https://grsecurity.net --ignore-url ^https://support.pingidentity.com --ignore-url ^https://docs.pingidentity.com --ignore-url ^https://demo.example.com + --ignore-url ^https://my-deployment.my-region.nginxaas.net/connectivity --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginx-ingress-controller/css --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginxaas/azure/css --ignore-url ^https://\([a-zA-Z0-9-]+\).nginx.com/nginx-gateway-fabric/css @@ -52,10 +53,9 @@ jobs: - nginx-instance-manager - solutions - nginx-app-protect-dos - - nginx-app-protect-waf + - waf - nginx-ingress-controller - nginxaas/azure - - nginx-service-mesh - nginx-amplify - nginx-controller - nginx-waf diff --git a/.github/workflows/notification.yml b/.github/workflows/notification.yml index d0494d9d3..a01c09880 100644 --- a/.github/workflows/notification.yml +++ b/.github/workflows/notification.yml @@ -4,7 +4,6 @@ on: branches: [main] workflows: - "QE LinkChecker" - - "Check for Broken Links" - "UI validation on prod" types: [completed] diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 1abc40809..3130ef440 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: Run analysis - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif @@ -48,7 +48,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF format to the repository Actions tab. - name: Upload artifact - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif @@ -56,6 +56,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: Upload SARIF results to code scanning - uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/playwright.yml b/.github/workflows/playwright.yml index 67d920cf8..209d8716f 100644 --- a/.github/workflows/playwright.yml +++ b/.github/workflows/playwright.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-node@v5 + - uses: actions/setup-node@v6 with: node-version: lts/* - name: Install dependencies @@ -18,13 +18,13 @@ jobs: run: npx playwright install --with-deps - name: Run Playwright tests run: npx playwright test --retries=2 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 if: ${{ !cancelled() }} with: name: playwright-report path: tests/playwright-report/ retention-days: 30 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 if: ${{ !cancelled() }} with: name: test-results diff --git a/.github/workflows/stale-branches.yml b/.github/workflows/stale-branches.yml new file mode 100644 index 000000000..cfbe0c295 --- /dev/null +++ b/.github/workflows/stale-branches.yml @@ -0,0 +1,41 @@ +name: Prune stale branches +on: + workflow_dispatch: + schedule: + - cron: '30 1 * * *' # run every day at 01:30 UTC + +jobs: + stale-branches: + runs-on: ubuntu-latest + env: + ORG: nginx + REPO: documentation + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + permissions: + contents: write + steps: + - name: Prune all stale branches + run: | + HAS_MORE="true" + + while [ "$HAS_MORE" == "true" ]; do + RESPONSE=$(curl -L -s \ + -X GET \ + -H "Accept: application/json" \ + -s "https://github.com/${{env.ORG}}/${{env.REPO}}/branches/stale") + + HAS_MORE=$(echo "$RESPONSE" | jq -r '.payload.has_more') + BRANCHES=$(echo "$RESPONSE" | jq -r '.payload.branches[].name') + + for BRANCH in $BRANCHES; do + echo "Deleting branch $BRANCH..." + DELETE_RESPONSE=$(curl -L -s \ + -X DELETE \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{env.GITHUB_TOKEN}}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/${{env.ORG}}/${{env.REPO}}/git/refs/heads/$BRANCH) + echo "Delete response for branch $BRANCH: $DELETE_RESPONSE" + done + done + diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index f10888ec3..64b58a600 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -18,7 +18,7 @@ jobs: pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-latest steps: - - uses: actions/stale@v10.0.0 + - uses: actions/stale@v10.1.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue is stale because it has been open for 90 days with no activity. Remove the stale label or add a comment to keep it open. If you do not take action, this will be closed in 10 days.' diff --git a/.gitlint b/.gitlint index c776e9dbb..03a77d3f7 100644 --- a/.gitlint +++ b/.gitlint @@ -3,7 +3,7 @@ ignore = body-is-missing contrib = contrib-title-conventional-commits [title-max-length] -line-length = 50 +line-length = 60 [title-min-length] min-length = 5 diff --git a/.markdownlint.yaml b/.markdownlint.yaml index 8ed0d233d..26a707400 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -1,42 +1,40 @@ -default: true +# markdownlint enables all rules by default +# Setting default to false means that rules are +# instead enabled on a case-by-case basis +default: false -MD002: false +# Heading levels should only increment by one level at a time +MD001: true -MD003: false +# Detects if link brackets are in the wrong order: ()[] +MD011: true -MD004: - style: dash +# No multiple consecutive blank lines +MD012: true -MD009: false +# Headings should be surrounded by blank lines +MD022: true -MD010: false +# Headings must start at the beginning of a line +MD023: true -MD012: false +# No trailing punctuation in headings +MD026: true -MD013: - line_length: 5000 - heading_line_length: 60 - code_block_line_length: 80 - code_blocks: true - tables: false - headings: true - headers: true - strict: false - stern: false +# Code blocks should be surrounded by blank lines +MD031: true -MD022: false +# Lists should be surrounded by blank lines +MD032: true -MD024: - siblings_only: true +# Emphasis should not be used instead of headings +MD036: true -MD029: false +# No spaces on the outside of link text +MD039: true -MD033: false +# All images should have alt text +MD045: true -MD034: false - -MD041: false - -MD046: false - -MD051: false \ No newline at end of file +# Tables should be surrounded by blank lines +MD058: true \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d48d49e18..8f91396aa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,9 +4,13 @@ default_install_hook_types: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: no-commit-to-branch +- repo: https://github.com/DavidAnson/markdownlint-cli2 + rev: v0.18.1 + hooks: + - id: markdownlint-cli2 - repo: https://github.com/jorisroovers/gitlint rev: v0.19.1 hooks: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3988f3345..3141d7b41 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,14 +34,14 @@ If you'd like to discuss something NGINX-related that doesn't involve documentat ## Submit a pull request -Before making documentation changes, you should view the [documentation style guide](/documentation/style-guide.md) and [Managing content with Hugo](/documentation/writing-hugo.md). +Before making documentation changes, you should view the [documentation style guide](/documentation/style-guide.md) and [Managing content with Hugo](/documentation/hugo-content.md). To understand how we use Git in this repository, read our [Git conventions](/documentation/git-conventions.md) documentation. The broad workflow is as follows: - Fork the NGINX repository - - If you're an F5/NGINX user, you can work from a clone + - If you're an F5/NGINX user, you can branch directly with a clone - Create a branch - Implement your changes in your branch - Submit a pull request (PR) when your changes are ready for review @@ -53,10 +53,9 @@ Alternatively, you're welcome to suggest improvements to highlight problems with To ensure a balance between work carried out by the NGINX team while encouraging community involvement on this project, we use the following issue lifecycle: -- A new issue is created by a community member -- An owner on the NGINX team is assigned to the issue; this owner shepherds the issue through the subsequent stages in the issue lifecycle -- The owner assigns one or more [labels](https://github.com/nginxinc/oss-docs/issues/labels) to the issue -- The owner, in collaboration with the community member, determines what milestone to attach to an issue. They may be milestones correspond to product releases +- A new issue is created by +- A maintainer from the F5 team is assigned to the issue; this maintainer shepherds the issue through the subsequent stages in the issue lifecycle +- The maintainer assigns one or more [labels](https://github.com/nginx/documentation/labels) to the issue ## Additional NGINX documentation diff --git a/_banners/eol-amplify.md b/_banners/eol-amplify.md new file mode 100644 index 000000000..0f70571c4 --- /dev/null +++ b/_banners/eol-amplify.md @@ -0,0 +1,12 @@ +{{< banner "warning" "Support for NGINX Amplify Ends January 31, 2026" >}} + +NGINX Amplify support ends on January 31, 2026. To continue monitoring your NGINX instances, migrate to the NGINX One Console, part of the NGINX One product offering. + +**Transition steps:** + +1. [Contact the F5 NGINX Sales team](https://www.f5.com/products/get-f5/f5-nginx-products-and-packaging) to get an NGINX One subscription, if you don’t already have one. +2. [Uninstall the NGINX Amplify agent]({{< ref "/amplify/nginx-amplify-agent/install/uninstalling-amplify-agent.md" >}}). +3. [Follow the NGINX One Console getting started guide]({{< ref "/nginx-one/getting-started.md" >}}). + +[Professional Services](https://www.f5.com/services) are available to support your transition. Additional fees may apply. +{{}} \ No newline at end of file diff --git a/_banners/waf-parameter-reference.md b/_banners/waf-parameter-reference.md new file mode 100644 index 000000000..a4c60c235 --- /dev/null +++ b/_banners/waf-parameter-reference.md @@ -0,0 +1,7 @@ +{{< banner "note" "Policy parameter reference" >}} + +You can explore the parameters for each F5 WAF for NGINX feature on the [Policy parameter reference]({{< ref "/waf/policies/parameter-reference.md" >}}) page. + +This page was previously referred to as the "Declarative Policy". + +{{< /banner >}} diff --git a/archetypes/concept.md b/archetypes/concept.md index b72f32b60..6ef0b90ee 100644 --- a/archetypes/concept.md +++ b/archetypes/concept.md @@ -8,7 +8,7 @@ toc: false # Types have a 1:1 relationship with Hugo archetypes, so you shouldn't need to change this nd-content-type: concept # Intended for internal catalogue and search, case sensitive: -# Agent, N4Azure, NIC, NIM, NGF, NAP-DOS, NAP-WAF, NGINX One, NGINX+, Solutions, Unit +# AGE, DOS, NAZ, NGC, NGF, NIC, NIM, NGF, ONE, NOS, NPL, SOL, WAF nd-product: --- @@ -42,7 +42,7 @@ It is an example of a , and is closely related to # Read their documentation for usage: https://mermaid.js.org/intro/ ``` -Starting from the of the diagram, you can see that is connected to : this relationship is established when configuring as part of . +Starting from the \ of the diagram, you can see that \ is connected to \: this relationship is established when configuring \ as part of \. [//]: # "End a particular use case section with links to other pages, such as instructional documentation, other concepts, or reference information (Such as API specifications)." @@ -52,7 +52,6 @@ Starting from the of the diagram, you can see that is connect ### Use case 2 - ## Conclusion [//]: # "Summarize everything that the reader will have learned by reading this entire concept document." diff --git a/archetypes/default.md b/archetypes/default.md index ba9cf1281..5e58d6feb 100644 --- a/archetypes/default.md +++ b/archetypes/default.md @@ -8,7 +8,7 @@ toc: false # Types have a 1:1 relationship with Hugo archetypes, so you shouldn't need to change this nd-content-type: how-to # Intended for internal catalogue and search, case sensitive: -# Agent, N4Azure, NIC, NIM, NGF, NAP-DOS, NAP-WAF, NGINX One, NGINX+, Solutions, Unit +# AGE, DOS, NAZ, NGC, NGF, NIC, NIM, NGF, ONE, NOS, NPL, SOL, WAF nd-product: --- @@ -39,6 +39,7 @@ To complete this guide, you will need the following prerequisites: ```shell # We typically show examples of commands or code in one code block, which can be easily copied by a reader using a button connected to the block. ``` + ```text # A second code block is used underneath the first to show what kind of example output to expect from the command. Truncate unnecessary output with ellipses (...). ``` @@ -57,17 +58,14 @@ To complete this guide, you will need the following prerequisites: ### Sub-step 1 - ### Sub-step 2 - ## Step 3 [//]: # "The final step of a how-to guide is usually a final test, and summarizes all of the previous steps taken to accomplish the purpose of the guide." ### Sub-step 1 - ### Sub-step 2 ## Next steps diff --git a/archetypes/landing-page.md b/archetypes/landing-page.md index be1cca187..184527cf6 100644 --- a/archetypes/landing-page.md +++ b/archetypes/landing-page.md @@ -14,11 +14,12 @@ nd-landing-page: true # Types have a 1:1 relationship with Hugo archetypes, so you shouldn't need to change this nd-content-type: landing-page # Intended for internal catalogue and search, case sensitive: -# Agent, N4Azure, NIC, NIM, NGF, NAP-DOS, NAP-WAF, NGINX One, NGINX+, Solutions, Unit +# AGE, DOS, NAZ, NGC, NGF, NIC, NIM, NGF, ONE, NOS, NPL, SOL, WAF nd-product: --- ## About + [//]: # "These are Markdown comments to guide you through document structure. Remove them as you go, as well as any unnecessary sections." [//]: # "Use underscores for _italics_, and double asterisks for **bold**." [//]: # "Backticks are for `monospace`, used sparingly and reserved mostly for executable names - they can cause formatting problems. Avoid them in tables: use italics instead." @@ -27,6 +28,7 @@ nd-product: [//]: # "Name specific functionality it provides: avoid ambiguous descriptions such as 'enables efficiency', focus on what makes it unique." ## Featured content + [//]: # "You can add a maximum of three cards: any extra will not display." [//]: # "One card will take full width page: two will take half width each. Three will stack like an inverse pyramid." [//]: # "Some examples of content could be the latest release note, the most common install path, and a popular new feature." diff --git a/archetypes/tutorial.md b/archetypes/tutorial.md index 1190e6072..a9ae12e45 100644 --- a/archetypes/tutorial.md +++ b/archetypes/tutorial.md @@ -8,7 +8,7 @@ toc: false # Types have a 1:1 relationship with Hugo archetypes, so you shouldn't need to change this nd-content-type: tutorial # Intended for internal catalogue and search, case sensitive: -# Agent, N4Azure, NIC, NIM, NGF, NAP-DOS, NAP-WAF, NGINX One, NGINX+, Solutions, Unit +# AGE, DOS, NAZ, NGC, NGF, NIC, NIM, NGF, ONE, NOS, NPL, SOL, WAF nd-product: --- @@ -18,16 +18,16 @@ nd-product: [//]: # "Begin each document with a sentence or two explaining what the purpose of the guide is, and what high-level actions to expect. No need to adhere precisely the example text given anywhere in this template." -This guide is a tutorial on how to set up . While going through the steps of this tutorial, , and will be introduced and explained to understand how works. +This guide is a tutorial on how to set up \. While going through the steps of this tutorial, \, \ and \ will be introduced and explained to understand how \ works. -By the end of the tutorial, you should have enough working knowledge of to develop your own . +By the end of the tutorial, you should have enough working knowledge of \ to develop your own \. ## Background [//]: # "The largest difference between a tutorial and a how-to document is the scope of detail included. While working on the tutorial, consider what overlap might exist with a concept document." [//]: # "We want to reduce the amount of context switching a reader has to go through, so it might be beneficial to convert some text content into an include for re-use between a tutorial and a concept document." - is a common use for : it enables the ability to use , and , which are important when configuring for . +\ is a common use for \: it enables the ability to use \, \ and \, which are important when configuring \ for \. ## Before you begin @@ -46,7 +46,7 @@ To complete this guide, you will need the following prerequisites: [//]: # "The text immediately following a heading in a tutorial should likely explain a concept to build a mental model of what the reader is about to do." [//]: # "If it's a successive step (One after the first), you might refer to work already done to follow the sequence of operations." -The first thing required for setting up is . This is the that the will run on. The that is set-up from this step is necessary for , and will be connected to and in a later step. The we are configuring will look something along the lines of this by the end: +The first thing required for setting up \ is \. This is the \ that the \ will run on. The \ that is set-up from this step is necessary for \, and will be connected to \ and \ in a later step. The \ we are configuring will look something along the lines of this by the end: [//]: # "If it helps, include a diagram of some kind. Ensure your description provides all the context required, however: a diagram is an aid to explain things, not a replacement." @@ -55,7 +55,7 @@ The first thing required for setting up is . This is the of the diagram, you can see that is connected to : this relationship is established when configuring as part of . +Starting from the \ of the diagram, you can see that \ is connected to \: this relationship is established when configuring \ as part of \. ### Sub-step 1 @@ -63,20 +63,22 @@ Starting from the of the diagram, you can see that is connect [//]: # "Though there may be multiple ways to accomplish a task, focus on showing the reader the exact way to do one." [//]: # "You can mention alternative paths, but do not give unnecessary detail: it detracts from the task at hand." -To set up , start by running the following command. It will create : take note of the value, as it will be used for connecting in later steps. +To set up \, start by running the following command. It will create \: take note of the \ value, as it will be used for connecting \ in later steps. ```shell # We typically show examples of commands or code in one code block, which can be easily copied by a reader using a button connected to the block. ``` + ```text # A second code block is used underneath the first to show what kind of example output to expect from the command. Truncate unnecessary output with ellipses (...). ``` -To verify the creation of , you can also inspect information about it using . The output should look something like this: +To verify the creation of \, you can also inspect information about it using \. The output should look something like this: ```shell ``` + ``` ``` @@ -89,17 +91,14 @@ To verify the creation of , you can also inspect information about it ### Sub-step 1 - ### Sub-step 2 - ## Conclusion [//]: # "Summarize everything that the reader will have learned and accomplished by the end of this tutorial." [//]: # "It should fulfill the promise made by the introductory paragraph at the top of the document." [//]: # "You may wish to link to another tutorial as the next logical step, but that could also be part of the 'See also' section." - ## Next steps [//]: # "Link to related documents, such as concepts, reference material or specific use cases." diff --git a/cloudcannon.config.yml b/cloudcannon.config.yml deleted file mode 100644 index 20e3bc41a..000000000 --- a/cloudcannon.config.yml +++ /dev/null @@ -1,814 +0,0 @@ -paths: - collections: content -collections_config: - pages: - parse_branch_index: true - path: / - standalone_pages: - path: content/ - output: true - parse_branch_index: false - filter: - base: strict - include: - - ossc.md - - search.md - - success.md - name: Standalone Website Pages - description: This collection contains standalone website pages like the search landing page. - icon: notes - sort_options: - - key: weight - order: ascending - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: true - disable_add_folder: false - disable_file_actions: false - schemas: {} - nginx_plus: - path: content/nginx - name: NGINX Plus - description: Documentation for NGINX and NGINX Plus. - output: true - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - preview: {} - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - concept: - path: .cloudcannon/schemas/concept.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Concept - icon: lightbulb - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - nap_dos: - path: content/nap-dos - output: true - name: F5 DoS for NGINX - description: Documentation for F5 DoS for NGINX - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - concept: - path: .cloudcannon/schemas/concept.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Concept - icon: lightbulb - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - nap_waf: - path: content/nap-waf - output: true - name: F5 WAF for NGINX - description: Documentation for F5 WAF for NGINX. - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - concept: - path: .cloudcannon/schemas/concept.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Concept - icon: lightbulb - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - nms: - path: content/nms - output: true - name: NGINX Management Suite - description: Documentation for NGINX Management Suite. - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - concept: - path: .cloudcannon/schemas/concept.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Concept - icon: lightbulb - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - policy: - path: .cloudcannon/schemas/nms/policy.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: ACM Policy - icon: table_chart - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - controller: - path: content/controller - output: true - name: NGINX Controller - description: Documentation for NGINX Controller. - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: true - disable_file_actions: true - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - modsec_waf: - path: content/modsec-waf - output: true - name: NGINX ModSec WAF - description: Documentation for NGINX ModSec WAF. - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: true - disable_add_folder: true - disable_file_actions: true - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - solution_bundles: - path: content/solutions - output: true - name: NGINX Solution Bundles - description: Instructions for deploying the multi-product solution bundles. - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/default.md - reorder_inputs: true - hide_extra_inputs: true - remove_empty_inputs: false - remove_extra_inputs: false - name: Task (default) - icon: task - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - includes: - path: content/includes - output: false - name: Content Reuse - description: A collection of files that can be reused in other documents across the website. - filter: - exclude: - - index.md - parse_branch_index: false - icon: notes - preview: - metadata: - - text: - - key: path - icon: folder - _editables: {} - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: false - disable_file_actions: false - schemas: - default: - path: .cloudcannon/schemas/includes.md - name: Includes template - headless-collection: - path: .cloudcannon/schemas/headless-collection.md - name: Index.md template - data: - path: data - output: false - icon: dataset - _editables: {} - _enabled_editors: - - data - - content - _inputs: {} - _select_data: {} - _structures: {} - create: - path: '[relative_base_path]/{directory}/{filename|slugify}.[ext]' - extra_data: {} - _inputs: {} - _select_data: {} - _structures: {} - disable_add: false - disable_add_folder: true - disable_file_actions: true - schemas: {} -collections_config_override: true -collection_groups: - - heading: "Standalone Web Pages" - collections: - - "standalone_pages" - - heading: "Product Docs" - collections: - - "nginx_plus" - - "nap_dos" - - "nap_waf" - - "nms" - - "controller" - - "modsec_waf" - - heading: "Solutions" - collections: - - "solution_bundles" - - heading: "Content Reuse" - collections: - - "includes" -_enabled_editors: - - content -_inputs: {} -_select_data: {} -_structures: {} -_editables: - text: - bold: true - copyformatting: true - italic: true - link: true - redo: true - removeformat: true - strike: true - subscript: true - superscript: true - underline: true - undo: true -data_config: true -timezone: Etc/UTC -commit_templates: - - template_string: '{commit_type}: {message|trim} {breaking_change|if=breaking_change_message}' - _inputs: - commit_type: - type: select - options: - allow_empty: true - values: - - feature - - fix - - refactor - - update - cascade: true - breaking_change_message: - type: text - cascade: true - extra_data: - breaking_change: |- - - ⚠️ {breaking_change_message} -_snippets_imports: - hugo: - exclude: - - hugo_youtube - - hugo_twitter - - hugo_vimeo - - hugo_instagram -_snippets: - important: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Important - subtext: - - key: content_markdown - - Important note in a callout box. - icon: priority_high - definitions: - shortcode_name: important - content_key: content_markdown - named_args: [] - note: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Note - subtext: - - key: content_markdown - - Note in callout box. - icon: edit_note - definitions: - shortcode_name: note - content_key: content_markdown - named_args: [] - see-also: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: See also - subtext: - - key: content_markdown - - See Also note in callout box. - icon: visibility - definitions: - shortcode_name: see-also - content_key: content_markdown - named_args: [] - before-you-begin: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Befor You Begin - subtext: - - key: content_markdown - - Before You Begin note in callout box. - icon: front_hand - definitions: - shortcode_name: before-you-begin - content_key: content_markdown - named_args: [] - deprecated: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Deprecated - subtext: - - key: content_markdown - - Deprecated callout in a box. - icon: delete - definitions: - shortcode_name: deprecated - content_key: content_markdown - named_args: [] - caution: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Caution - subtext: - - key: content_markdown - - Caution callout in a box. - icon: announcement - definitions: - shortcode_name: caution - content_key: content_markdown - named_args: [] - warning: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Warning - subtext: - - key: content_markdown - - Warning callout in a box. - icon: warning - definitions: - shortcode_name: warning - content_key: content_markdown - named_args: [] - call-out: - template: hugo_paired_shortcode_positional_args - inline: false - preview: - text: - - key: callout_title - - Callout - subtext: - - key: content_markdown - - Custome callout with title in a box. - icon: - - key: icon_param - - document - definitions: - shortcode_name: call-out - content_key: content_markdown - positional_args: - - editor_key: icon_param - type: string - - editor_key: callout_title - type: string - Internal comment: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Comment (Internal only) - subtext: - - key: content_markdown - - Internal comment that will not be rendered in the page. - icon: tag - definitions: - shortcode_name: comment - content_key: content_markdown - named_args: [] - include: - template: hugo_shortcode_positional_args - inline: true - preview: - text: Include - subtext: - - key: url - - Include snippet. - icon: file_copy - definitions: - shortcode_name: include - positional_args: - - editor_key: url - type: string - bootstrap-table: - template: hugo_paired_shortcode_positional_args - inline: false - preview: - text: Bootstrap Table - subtext: - - key: content_markdown_table - - type: code - - Markdown table using Bootstrap. - icon: table_chart - definitions: - shortcode_name: bootstrap-table - content_key: content_markdown_table - positional_args: - - editor_key: table_style - _inputs: - content_markdown_table: - type: code - cascade: true - raw-html: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: Raw HTML - subtext: - - key: content_code_block - - Raw HTML Code. - icon: html - definitions: - shortcode_name: raw-html - content_key: content_code_block - named_args: [] - img: - template: hugo_shortcode_named_args - inline: true - preview: - text: - - key: src - - Image - subtext: - - key: alt - - Alternative text - icon: image - definitions: - shortcode_name: img - named_args: - - editor_key: src - type: string - - editor_key: caption - optional: true - type: string - - editor_key: alt - optional: true - type: string - - editor_key: width - optional: true - type: string - - editor_key: height - optional: true - type: string - tab group: - template: hugo_paired_shortcode_named_args - inline: false - preview: - text: - - key: name - - Tab group section - subtext: - - key: content_code_block - - Tabs content - icon: tab - definitions: - shortcode_name: tabs - content_key: content_code_block - named_args: - - editor_key: name - type: string - tab section: - template: hugo_paired_markdown_shortcode_named_args - inline: false - preview: - text: - - key: name - - Tab inside tabbed section - subtext: - - key: tab_content - - Tab content - icon: tab - definitions: - shortcode_name: tabs - content_key: tab_content - named_args: - - editor_key: name - type: string - beta-badge: - template: hugo_shortcode_positional_args - inline: false - preview: - text: This topic documents an early access feature. - subtext: >- - These features are provided for you to try before they are generally - available. You shouldn't use early access features for production - purposes. - icon: science - definitions: - shortcode_name: beta-badge - relref: - template: hugo_shortcode_positional_args - inline: true - preview: - view: inline - icon: link - text: - - key: url - - Empty URL - definitions: - shortcode_name: relref - positional_args: - - editor_key: url - type: string - link: - template: hugo_shortcode_positional_args - inline: true - preview: - text: - - key: text - - Link to download a file - icon: system_update_alt - definitions: - shortcode_name: link - positional_args: - - editor_key: url - type: string - - editor_key: text - fontawesome-Icon: - template: hugo_shortcode_positional_args - inline: true - preview: - view: inline - text: FA-icon - icon: - - key: icon_name - definitions: - shortcode_name: fa - positional_args: - - editor_key: icon_name - type: string - - editor_key: styles - type: string - optional: true - remove_empty: true - icon-resolved: - template: hugo_markdown_shortcode_named_args - inline: true - preview: - text: Resolved Bug Icon - icon: bug_report - definitions: - shortcode_name: icon-resolved - content_key: content_markdown - named_args: [] - icon-bug: - template: hugo_markdown_shortcode_named_args - inline: true - preview: - text: Bug Icon - icon: bug_report - definitions: - shortcode_name: icon-bug - content_key: content_markdown - named_args: [] - shortversions: - template: hugo_shortcode_positional_args - inline: false - preview: - text: Shortversions - This shortcode has been deprecated - icon: format_list_numbered - definitions: - shortcode_name: shortversions - positional_args: - - editor_key: from - type: string - - editor_key: to - type: string - - editor_key: product - type: string - versions: - template: hugo_shortcode_positional_args - inline: false - preview: - text: Versions - This shortcode has been deprecated - icon: format_list_numbered - definitions: - shortcode_name: versions - positional_args: - - editor_key: from - type: string - - editor_key: to - type: string - - editor_key: product - type: string diff --git a/config/_default/config.toml b/config/_default/config.toml index c80e3958a..a1c107d9c 100644 --- a/config/_default/config.toml +++ b/config/_default/config.toml @@ -36,6 +36,11 @@ enableGitInfo = true unsafe = true [markup.highlight] guessSyntax = true + [markup.goldmark.extensions.typographer] + rightDoubleQuote = '"' + leftDoubleQuote = '"' + rightSingleQuote = ''' + leftSingleQuote = ''' [params] useSectionPageLists = "false" diff --git a/content/_index.md b/content/_index.md index 515bcd330..ba5fe7d35 100644 --- a/content/_index.md +++ b/content/_index.md @@ -43,7 +43,10 @@ Learn how to deliver, manage, and protect your applications using F5 NGINX produ {{}} {{}} - {{}} + {{}} Infrastructure-as-a-Service (IaaS) version of NGINX Plus for your Microsoft Azure application stack. {{}} + {{}} + Managed NGINX service for your Google Cloud application stack. + {{}} {{}} diff --git a/content/agent/configuration/configuration-overview.md b/content/agent/configuration/configuration-overview.md index 0a85eede6..22a602330 100644 --- a/content/agent/configuration/configuration-overview.md +++ b/content/agent/configuration/configuration-overview.md @@ -3,8 +3,9 @@ title: Basic configuration draft: false weight: 100 toc: true -nd-docs: DOCS-1229 nd-content-type: how-to +nd-product: Agent +nd-docs: DOCS-1229 --- The following sections explain how to configure NGINX Agent using configuration files, CLI flags, and environment variables. @@ -27,8 +28,7 @@ The default locations of configuration files for NGINX Agent are `/etc/nginx-age Examples of the configuration files are provided below: -
- example nginx-agent.conf +{{< details summary="Open nginx-agent.conf example">}} {{< call-out "note" >}} In the following example `nginx-agent.conf` file, you can change the `server.host` and `server.grpcPort` to connect to the control plane. @@ -112,11 +112,9 @@ nginx_app_protect: precompiled_publication: true ``` -
- +{{< /details >}} -
- example dynamic-agent.conf +{{< details summary="Open dynamic-agent.conf example">}} {{< call-out "note" >}} Default location in Linux environments: `/var/lib/nginx-agent/agent-dynamic.conf` @@ -146,7 +144,7 @@ tags: - qa ``` -
+{{< /details >}} ## CLI Flags & Environment Variables @@ -239,8 +237,7 @@ Default location in FreeBSD environments: `/var/db/nginx-agent/agent-dynamic.con By default, NGINX Agent rotates logs daily using logrotate with the following configuration: -
- NGINX Agent Logrotate Configuration +{{< details summary="Logrotate configuration example" >}} ``` yaml /var/log/nginx-agent/*.log @@ -263,7 +260,7 @@ By default, NGINX Agent rotates logs daily using logrotate with the following co notifempty } ``` -
+{{< /details >}} If you need to change the default configuration, update the file at `/etc/logrotate.d/nginx-agent`. diff --git a/content/amplify/_index.md b/content/amplify/_index.md index f93c4b15c..cdb87a310 100644 --- a/content/amplify/_index.md +++ b/content/amplify/_index.md @@ -4,5 +4,9 @@ description: Lightweight SaaS monitoring and static analysis for NGINX Open Sour url: /nginx-amplify/ cascade: logo: "NGINX-Amplify-product-icon-RGB.svg" + nd-banner: + enabled: true + type: deprecation + md: _banners/eol-amplify.md --- diff --git a/content/amplify/overview/overview-main-components.md b/content/amplify/overview/overview-main-components.md index ecb453b6e..35b736054 100644 --- a/content/amplify/overview/overview-main-components.md +++ b/content/amplify/overview/overview-main-components.md @@ -8,7 +8,7 @@ nd-docs: DOCS-976 ## What Is F5 NGINX Amplify? -[NGINX Amplify](https://amplify.nginx.com/signup/) offers in-depth monitoring for NGINX-based web applications. It simplifies the process of analyzing and resolving issues related to performance and scalability. +[NGINX Amplify](https://amplify.nginx.com/) offers in-depth monitoring for NGINX-based web applications. It simplifies the process of analyzing and resolving issues related to performance and scalability. With NGINX Amplify, you can: diff --git a/content/controller/_index.md b/content/controller/_index.md deleted file mode 100644 index de4760e6f..000000000 --- a/content/controller/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: "NGINX Controller provides application delivery and API - management for modern app teams." -title: F5 NGINX Controller -weight: 2100 -cascade: - logo: "NGINX-Controller-product-icon-RGB.svg" - noindex: true - nd-banner: - enabled: true - type: deprecation - md: _banners/eos-cltr.md -url: /nginx-controller/ ---- - diff --git a/content/controller/admin-guides/_index.md b/content/controller/admin-guides/_index.md deleted file mode 100644 index 7c7510467..000000000 --- a/content/controller/admin-guides/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -description: Learn how to install and manage NGINX Controller and NGINX Controller - Agent. -title: Admin Guides -weight: 100 -url: /nginx-controller/admin-guides/ ---- - diff --git a/content/controller/admin-guides/backup-restore/_index.md b/content/controller/admin-guides/backup-restore/_index.md deleted file mode 100644 index 542b76cb3..000000000 --- a/content/controller/admin-guides/backup-restore/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn how to back up and restore NGINX Controller. -title: Back Up & Restore -weight: 300 -url: /nginx-controller/admin-guides/backup-restore/ ---- - diff --git a/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md b/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md deleted file mode 100644 index 146217e3b..000000000 --- a/content/controller/admin-guides/backup-restore/backup-restore-cluster-config.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -description: Learn how to back up your F5 NGINX Controller cluster configuration and - encryption keys. -nd-docs: DOCS-247 -title: Back Up & Restore Cluster Config and Encryption Keys -toc: true -weight: 97 -type: -- how-to ---- - -## Overview - -After installing F5 NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. - -- To back up the NGINX Controller cluster configuration and encryption keys: - - ```bash - /opt/nginx-controller/helper.sh cluster-config save - ``` - - The file is saved to `/opt/nginx-controller/cluster-config.tgz`. - -- To restore the cluster's config and encryption keys, take the following steps: - - ```bash - /opt/nginx-controller/helper.sh cluster-config load - ``` - -{{< versions "3.12" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md b/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md deleted file mode 100644 index f1e211f4b..000000000 --- a/content/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -description: Learn how to back up and restore the embedded F5 NGINX Controller config - database. -nd-docs: DOCS-248 -title: Back Up & Restore an Embedded Config Database -toc: true -weight: 98 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to back up and restore an internal F5 NGINX Controller config database. Use this guide if you selected the option to use an embedded config database when you installed NGINX Controller. Embedded config means that NGINX Controller is using an internal database to store configuration data. - -## Automated Backups of Embedded Config Database - -NGINX Controller automatically takes a snapshot of the embedded config database every 60 minutes and saves the backups on the config DB volume. The backup file location varies depending on the volume chosen at setup: - -- **Local**: The backup files are located in `/opt/nginx-controller/postgres_data/` with the following naming scheme: `backup_.tar`. - -- **NFS**: The backup files are located in the path on the NFS server host that was specified during installation and have the following naming scheme: `backup_.tar`. - -These automated config backups do not include backups of metrics data, which must be backed up separately; refer to [Backup & Restore the Metrics Database]({{< ref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) for those instructions. - -{{< call-out "tip" >}} -As a best practice, we recommend that you make scheduled backups of the entire config DB volume and keep the backups off-site for safekeeping. -{{< /call-out >}} - -  - ---- - -## Restore Embedded Config Database - -This section explains how to restore the embedded config database from the latest backup file or a specific, timestamped file. - -{{< call-out "important" >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< ref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /call-out >}} - -- To restore the embedded NGINX Controller config database **from the latest automated backup**, run the following command: - - ```bash - /opt/nginx-controller/helper.sh backup restore - ``` - -- To restore the embedded config database from **a specific backup file**: - - ```bash - /opt/nginx-controller/helper.sh backup restore - ``` - - - If you installed the embedded config database on a **local volume**, the backup files are located in `/opt/nginx-controller/postgres_data/`. - - - If you installed the embedded config database on an **NFS volume**, follow the steps in [(NFS) Copy Config Database Backup to Local Volume for Restoration]({{< ref "/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md#nfs-copy-config-database-backup-to-local-volume-for-restoration" >}}) to download the backup file to your local volume, and then use the `helper.sh` script to restore from it. - -  - -### (NFS) Copy Config Database Backup to Local Volume for Restoration - - - -To restore the embedded config database from a specific backup file, the file needs to be on your local volume. - -Take the following steps to copy an embedded config database backup file from an NFS volume to your local volume for restoration: - -1. Log on to the node where PostgreSQL is installed as a user with sudo privileges. - -1. Change to the `/opt/nginx-controller` directory: - - ``` bash - cd /opt/nginx-controller - ``` - -1. Create a local backup directory to copy the backup file to: - - ``` bash - mkdir local_backups - ``` - -1. Get the NFS volume details: - - ``` bash - mount | grep nfs - ``` - - The output looks similar to the following: - - ``` bash - : on type nfs4 (mount options...) - ``` - - For example: - - ``` bash - 192.0.2.1:/mnt/nfs_share/nfs_postgresql on /var/lib/kubelet/pods/1ce4e221-d6d6-434f-9e73-bc81c879530e/volumes/kubernetes.io~nfs/controller-postgres type nfs4 (mount options ...) - ``` - -1. Record the `:` details corresponding to the `nfs_postgresql` volume, namely the volume mounted on the Kubernetes `controller-postgres` container. - - For example: - - ``` bash - 192.0.2.1:/mnt/nfs_share/nfs_postgresql - ``` - -1. Create a parent directory to mount the NFS path to: - - ``` bash - sudo mkdir -p /mnt/local_pgdata - ``` - -1. Mount the NFS path: - - ``` bash - sudo mount : /mnt/local_pgdata - ``` - - For example: - - ``` bash - sudo mount 192.0.2.1:/mnt/nfs_share/nfs_postgresql /mnt/local_pgdata - ``` - -1. View the list of the available backup files. The files have the following naming scheme: `backup_.tar`. - - ```bash - ls /mnt/local_pgdata/ - ``` - -1. Copy the backup file from which you want to restore to the `local_backups/` directory: - - ``` bash - sudo cp /mnt/local_pgdata/backup_.tar local_backups/ - ``` - -1. Use the NGINX Controller `helper.sh` script to restore the backup file: - - ``` bash - /opt/nginx-controller/helper.sh backup restore local_backups/backup_.tar - ``` - -1. After the backup has been restored, you can unmount the NFS path and delete the backup file in the `local_backups` directory: - - ``` bash - sudo umount /mnt/local_pgdata - rm -i local_backups/backup_.tar - ``` - - - - -  - ---- - -## What's Next - -- [Backup & Restore the Metrics Database]({{< ref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) - -{{< versions "3.12" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md b/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md deleted file mode 100644 index 8ff44f638..000000000 --- a/content/controller/admin-guides/backup-restore/backup-restore-external-config-db.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Learn how to back up and restore the external F5 NGINX Controller config - database. -nd-docs: DOCS-249 -title: Back Up & Restore an External Config Database -toc: true -weight: 99 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to back up and restore an external F5 NGINX Controller config database. Use this guide if you selected the option to use an external PostgreSQL config database when you installed NGINX Controller. External config means that you set up NGINX Controller to store configuration data in your own Postgres database. - -## Before You Begin - -To backup and restore the external config database, you'll need the following: - -- Login credentials for your NGINX Controller PostgreSQL database -- A connection to your NGINX Controller PostgreSQL database -- [psql](https://www.postgresql.org/docs/9.5/app-psql.html) and [pg_dump](https://www.postgresql.org/docs/9.5/app-pgdump.html) installed on the server where you'll be performing the backup or restore - -### Set the PostgreSQL Environment Variables - -1. Log in to the NGINX Controller host using SSH. -2. Set the following environment variables using the credentials for your NGINX Controller PostgreSQL database: - - ``` bash - export PGHOST= - export PGPORT=5432 - export PGUSER= - export PGPASSWORD= - ``` - - {{< call-out "note" >}} -If you've configured PostgreSQL to use SSL, ensure that you've placed your certs in `~/.postgresql`. For more information, see [Client Certificates](https://www.postgresql.org/docs/9.5/libpq-ssl.html#LIBPQ-SSL-CLIENTCERT) in the PostgreSQL documentation. - {{< /call-out >}} - -  - ---- - -## Back Up External Config Database - -Take the following steps to back up the external NGINX Controller config database: - -1. Stop NGINX Controller: - - ``` bash - /opt/nginx-controller/helper.sh controller stop - ``` - -1. Run the following script to back up the NGINX Controller database. The backup files are saved in a directory that looks like `pgbackup_`. - - ``` bash - DATE=$(date +"%Y%m%d%H%M") - mkdir ~/pgbackup_${DATE} - - for db in common data system vault; do - pg_dump -w -E utf8 ${db} -F c -f ~/pgbackup_${DATE}/${db}-${DATE}.backup - done - ``` - -1. Start NGINX Controller: - - ``` bash - /opt/nginx-controller/helper.sh controller start - ``` - - -  - ---- - -## Restore External Config Database - -{{< call-out "important" >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< ref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /call-out >}} - -To restore the external NGINX Controller config database: - -1. Stop NGINX Controller: - - ``` bash - /opt/nginx-controller/helper.sh controller stop - ``` - -1. Locate the backup directory and save the name as a local environment variable. The name of the backup directory follows the format `pgbackup_`. - - ``` bash - BACKUP_PATH=~/pgbackup_ - ``` - -1. Run the restore script: - - ``` bash - for backup_file in "$BACKUP_PATH"/*.backup; do - db="$(basename "$backup_file" | cut -d '-' -f 1)" - pg_restore -c -C -d "$db" "$backup_file" - done - ``` - -1. Start NGINX Controller: - - ``` bash - /opt/nginx-controller/helper.sh controller start - ``` - - -  - ---- - -## What's Next - -- [Backup & Restore the Metrics Database]({{< ref "/controller/admin-guides/backup-restore/backup-restore-metrics-db.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md b/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md deleted file mode 100644 index bd372ded2..000000000 --- a/content/controller/admin-guides/backup-restore/backup-restore-metrics-db.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Learn how to back up and restore the F5 NGINX Controller analytics database. -nd-docs: DOCS-250 -title: Back Up & Restore the Analytics Database -toc: true -weight: 100 -type: -- how-to ---- - -## Overview - -This guide explains how to back up and restore the F5 NGINX Controller analytics database. Backing up and restoring the analytics data lets you preserve the history of graphs. Backing up this information is optional. - -## Back Up the Analytics Database - -Make a backup copy of the metrics database following the steps for your volume type: - -- **Local**: Make a back up copy of the metrics data that's located in `/opt/nginx-controller/clickhouse_data` by default, or on the volume that you specified when installing NGINX Controller. - -- **NFS**: Make a backup copy of all of the data in the NFS path or make a copy of the ClickHouse binary data. Refer to the official ClickHouse documentation on [Data Backup](https://clickhouse.tech/docs/en/operations/backup/). - -- **EBS**: For AWS, refer to the [Amazon EBS snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) documentation to create a volume snapshot. - -## Restore the Analytics Database - -Restore the backup copy of the metrics database following the steps for your volume type: - -- **Local**: Copy the data you backed up to `/opt/nginx-controller/clickhouse_data`. - -- **NFS**: Copy the ClickHouse binary data in the NFS path. Refer to the official ClickHouse documentation on [Data Backup](https://clickhouse.tech/docs/en/operations/backup/). - -- **EBS**: For AWS, refer to the [Amazon EBS snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) documentation to restore a volume snapshot. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/_index.md b/content/controller/admin-guides/config-agent/_index.md deleted file mode 100644 index 5d32cc45c..000000000 --- a/content/controller/admin-guides/config-agent/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -description: Learn how to configure and manage the interaction between the F5 NGINX - Plus data plane and NGINX Controller. -title: Configure NGINX Controller Agent -weight: 200 -url: /nginx-controller/admin-guides/config-agent/ ---- - diff --git a/content/controller/admin-guides/config-agent/about-controller-agent.md b/content/controller/admin-guides/config-agent/about-controller-agent.md deleted file mode 100644 index e5efa82c0..000000000 --- a/content/controller/admin-guides/config-agent/about-controller-agent.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Learn about the NGINX Controller Agent. -nd-docs: DOCS-508 -title: Get to Know the F5 NGINX Controller Agent -toc: true -weight: 100 -type: -- concept ---- - -## Overview - -The F5 NGINX Controller Agent is a compact application written in Golang. NGINX Controller uses the Controller Agent to manage and monitor each NGINX Plus instance that the Agent is installed on. Once installed, the NGINX Controller Agent collects metrics and metadata and sends them securely to NGINX Controller for storage and visualization. - -## How NGINX Controller Agent Works - -You need to [install the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) on all of the hosts you'd like to monitor. - -Once installed, the NGINX Controller Agent automatically starts to report metrics. You should see the real-time metrics data in the NGINX Controller user interface after about one minute. - -There's no need to manually add or configure anything in the NGINX Controller user interface after installing the Agent. When the Agent is started, the metrics and the metadata are automatically reported to NGINX Controller and are visualized in the user interface. You can, however, [configure the NGINX Controller Agent]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) to customize how it collects and reports metrics. - -All communications between the NGINX Controller Agent and the backend are done securely over SSL/TLS. All traffic is always initiated by the NGINX Controller Agent. The backend system doesn't set up any connections back to the NGINX Controller Agent. - -## Detecting and Monitoring NGINX Instances - -The NGINX Controller Agent attempts to detect and monitor all unique NGINX process instances running on a host and collects a separate set of metrics and metadata for each. The Agent uses the following qualifications to identify unique NGINX instances: - -- A unique control process and its workers, started with an **absolute path** to a distinct NGINX binary. -- A control process running with a default config path, or with a custom path set in the command-line parameters. - -{{< call-out "caution" >}}You should not make manual changes to the `nginx.conf` file on NGINX Plus instances that are managed by NGINX Controller. Manually updating the `nginx.conf` file on managed instances may adversely affect system performance. In most cases, NGINX Controller will revert or overwrite manual updates made to `nginx.conf`.{{< /call-out >}} - -
- -## Supported Systems - -NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. - -{{< call-out "note" >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /call-out>}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| -|--- |--- |--- |--- |--- |--- | -|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | -|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | -|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support
AVRD.| -|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| -|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| -|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | -|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | -|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| -|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | -|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | - -{{< /bootstrap-table >}} - - - - -#### Analytics, Visibility, and Reporting Daemon (AVRD) - -NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) topic. - -{{< call-out "note" >}} -See the [NGINX Controller Technical Specifications]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for the complete list of system requirements for NGINX Controller and the NGINX Controller Agent. -{{< /call-out>}} - -## Supported Python Versions - -NGINX Controller and the NGINX Controller Agent versions 3.6 and earlier require Python 2.6 or 2.7. Python is not needed for NGINX Controller or the NGINX Controller Agent versions 3.7 and later. - -## What's Next - -- [Install the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) -- [Customize how the NGINX Controller Agent collects metrics]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/configure-metrics-collection.md b/content/controller/admin-guides/config-agent/configure-metrics-collection.md deleted file mode 100644 index c88443035..000000000 --- a/content/controller/admin-guides/config-agent/configure-metrics-collection.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -description: Contains instructions for setting up the F5 NGINX Controller Agent to - collect metrics for NGINX Plus instances. -nd-docs: DOCS-509 -title: Set up Metrics Collection -toc: true -weight: 120 -type: -- how-to ---- - -## Before You Begin - -- Before you can set up metrics collection, you first need to [install and start the F5 NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}), so that the Agent can start pushing aggregated data to NGINX Controller. - -## Objectives - -Follow the steps in this guide to configure how metrics are collected and monitored. - -## Configuring NGINX for Metrics Collection - -In order to monitor an NGINX Plus instance, the NGINX Controller Agent needs to find the relevant NGINX control process and determine its key characteristics. - -The Agent is able to automatically find all relevant NGINX configuration files, parse them, extract their logical structure, and send the associated JSON data to the Controller Server for further analysis and reporting. - -### SSL Certificate Parsing and Analysis - -To parse SSL certificate metadata, the NGINX Controller Agent uses standard `openssl(1)` functions. SSL certificates are parsed and analyzed only when the corresponding [settings]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md#default-agent-settings" >}}) are turned on. SSL certificate analysis is *on* by default. - -To enable or disable analyzing SSL certs: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Platform**. -3. On the **Platform** menu, select **Agent**. -4. On the **Default agent settings** page, select or clear the **Analyze SSL certificates** box. - -### Metrics from `/api` - -NGINX Controller uses the `/api` location on the NGINX Plus instance to collect metrics. - -When you push a configuration to an NGINX Plus instance, NGINX Controller automatically enables the `/api` location for that instance. - -{{< call-out "note" >}} -The `/api` location settings that NGINX Controller creates will override any settings that you have previously defined. -{{< /call-out >}} - -If you use NGINX Controller solely to monitor your NGINX Plus instances, you may need to enable the `/api` location on your instances manually. -Refer to the [Configuring the API](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api) section of the NGINX Plus Admin Guide for instructions. - -For more information about the metrics list, refer to [Overview: Metrics and Metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}). - -### Metrics from `access.log` and `error.log` - -The NGINX Controller Agent collects NGINX metrics from the [access.log](http://nginx.org/en/docs/http/ngx_http_log_module.html) and the [error.log](http://nginx.org/en/docs/ngx_core_module.html#error_log) by default. - -You don't have to specifically point the Agent to either the NGINX configuration or the NGINX log files. The Agent should detect their location automatically. However, **you do need to make sure that the Agent can read the log files**. - -To do so, verify that either the `nginx` user or the [user defined in the NGINX config](https://nginx.org/en/docs/ngx_core_module.html#user) -- such as `www-data` -- can read the log files. In addition, make sure that the log files are being written normally. - -The Agent will try to detect the [log format](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) for a particular log, so that it can parse the log correctly and extract relevant metrics data. - -#### Enable Custom `access.log` Metrics - -Some metrics included in the [NGINX Metrics reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) are not available unless the corresponding variables are included in a custom [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) format in the NGINX config. - -{{< call-out "note" >}} - -- Read [Configuring Logging](https://docs.nginx.com/nginx/admin-guide/monitoring/logging/#setting-up-the-access-log) in the NGINX Admin Guide. -- View the complete list of [NGINX log variables](https://nginx.org/en/docs/varindex.html). - -{{< /call-out>}}. - -Take the steps in this section to enable the NGINX Controller Agent to collect metrics from custom `access.log` variables. - -1. Add a new [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) format to the NGINX configuration (or modify an existing one). - -2. Add the desired [NGINX variables](https://nginx.org/en/docs/varindex.html) to the log format. For example: - - ```nginx - log_format main_ext '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for" ' - '"$host" sn="$server_name" ' - 'rt=$request_time ' - 'ua="$upstream_addr" us="$upstream_status" ' - 'ut="$upstream_response_time" ul="$upstream_response_length" ' - 'cs=$upstream_cache_status' ; - ``` - -3. Use the extended log format in your access log configuration: - - ```nginx - access_log /var/log/nginx/access.log main_ext; - ``` - - {{< call-out "note" >}} -By default, the Controller Agent processes all access logs that it finds in your log directory. If you define a new log file with the extended log format that contains entries that are already being logged to another access log, your metrics might be counted twice. Refer to the [Agent configuration]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) guide to learn how to exclude specific log files from processing. - {{< /call-out >}} - -4. Set the [error.log](https://nginx.org/en/docs/ngx_core_module.html#error_log) log level to `warn`. - - ```nginx - error_log /var/log/nginx/error.log warn; - ``` - -5. [Reload](https://nginx.org/en/docs/control.html) your NGINX configuration: - - ```bash - service nginx reload - ``` - -When the Controller Agent discovers these metrics, the NGINX Controller **Analytics Dashboards Overview** will automatically update with a predefined set of graphs. -You can also use these metrics to build more specific set of [custom Dashboards]({{< ref "/controller/analytics/dashboards/custom-dashboards.md" >}}). - -### Collect Metrics from Syslog - -If you set up the Controller Agent to [use Syslog]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md#logging-to-syslog" >}}), you need to set up the Controller Agent to collect metrics from Syslog. - -Take the steps below to enable metrics collection from Syslog: - -1. Edit the NGINX configuration file. - - 1. Specify the `syslog` listener address as the first parameter to the [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) directive. - 2. Include the `controller` tag and your preferred log format: - - ```nginx - access_log syslog:server=127.0.0.1:12000,tag=controller,severity=info main_ext; - ``` - -2. Reload NGINX: - - ```bash - service nginx reload - ``` - - For more information, see [Controlling NGINX](https://nginx.org/en/docs/control.html). - -{{< call-out "note" >}} -To send the NGINX logs to both the existing logging facility and the NGINX Controller Agent, include a separate [access.log](https://nginx.org/en/docs/http/ngx_http_log_module.html) directive for each destination. -{{< /call-out >}} - - -## What's Next - -- [Overview: NGINX Metrics and Metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) -- [What to check if the Controller Agent isn't reporting metrics]({{< ref "/controller/support/troubleshooting-controller.md#troubleshooting-metrics" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/configure-the-agent.md b/content/controller/admin-guides/config-agent/configure-the-agent.md deleted file mode 100644 index 9963d0e80..000000000 --- a/content/controller/admin-guides/config-agent/configure-the-agent.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -description: Customize the F5 NGINX Controller Agent configuration. -nd-docs: DOCS-510 -title: Configure the NGINX Controller Agent -toc: true -weight: 110 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to customize the F5 NGINX Controller Agent configuration. - -## Default Agent Settings - -To access the **Default Agent Settings** page: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Platform**. -3. On the **Platform** menu, select **Agent**. - -On the **Default Agent Settings** page, you can set the following default settings for the NGINX Controller Agent: - -- **NGINX configuration file analysis**. This setting is enabled by default. -- **Periodic NGINX configuration syntax checking with "nginx -t"**. This setting is disabled by default. -- **Analyzing SSL certs**. This setting is enabled by default. - -## Enable /api Location - -NGINX Controller uses the `/api` location on the NGINX Plus instance to collect metrics. - -When you push a configuration to an NGINX Plus instance, NGINX Controller automatically enables the `/api` location for that instance. - -{{< call-out "note" >}} -The `/api` location settings that NGINX Controller creates will override any settings that you have previously defined. -{{< /call-out >}} - -If you use NGINX Controller solely to monitor your NGINX Plus instances, you may need to enable the `/api` location on your instances manually. -Refer to the [Configuring the API](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api) section of the NGINX Plus Admin Guide for instructions. - -## Controller Agent Configuration File - -The configuration file for the NGINX Controller Agent is located at `/etc/controller-agent/agent.conf`. This configuration file is a text-based file. - -## Change the API Key - -When you first [install the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}), your API key is written to the `agent.conf` file automatically. If you ever need to change the API key, you can edit the following section in `agent.conf` accordingly: - -``` nginx -[credentials] -api_key = YOUR_API_KEY -``` - -## Change the Hostname and UUID - -To create unique objects for monitoring, the NGINX Controller Agent must be able to extract a valid hostname from the system. The hostname is also used as one of the components for generating a unique identifier. Essentially, the hostname and the UUID (universally unique identifier) unambiguously identify a particular instance of the NGINX Controller Agent to NGINX Controller. If the hostname or the UUID are changed, the NGINX Controller Agent and the server will register a new object for monitoring. - -The NGINX Controller Agent tries its best to determine the correct hostname. If the Agent cannot determine the hostname, you can set the hostname in the `agent.conf` file. Check for the following section, and provide the desired hostname here: - -``` nginx -[credentials] -.. -hostname = myhostname1 -``` - -The hostname should be real. The NGINX Controller Agent won't start unless a valid hostname is defined. The following *are not* valid hostnames: - -- localhost -- localhost.localdomain -- localhost6.localdomain6 -- ip6-localhost - -{{< call-out "note" >}} - -You can use the above method to replace the system's hostname with an arbitrary alias. Keep in mind that if you redefine the hostname for a live object, the existing object will be marked as failed in the NGINX Controller user interface. Redefining the hostname in the NGINX Controller Agent's configuration creates a new UUID and a new system for monitoring. - -Alternatively, you can define an alias for the host in the NGINX Controller user interface. Go to the **Graphs** page, select the system that you want to update, and click the gear icon. - -{{< /call-out >}} - -## Preserving the UUID across OS upgrades - -The UUID is generated based on a combination of the hostname and underlying OS functions. An upgrade to the OS may lead to a new UUID and cause previously registered agents to be offline. - -If your use case requires that the UUID persist across upgrades, you can set the `store_uuid` option in `agent.conf`: - -``` nginx -[credentials] -... -store_uuid = True -``` - -After restarting the Controller Agent -- `service controller-agent restart` -- the UUID will be persisted to `agent.conf` and used for future instance detection. - -## Set the Path to the NGINX Configuration File - -The NGINX Controller Agent detects the NGINX configuration file automatically. You shouldn't need to point the NGINX Controller Agent to the `nginx.conf` file explicitly. - -{{< call-out "caution" >}}You should not make manual changes to the `nginx.conf` file on NGINX Plus instances that are managed by NGINX Controller. Manually updating the `nginx.conf` file on managed instances may adversely affect system performance. In most cases, NGINX Controller will revert or overwrite manual updates made to `nginx.conf`.{{< /call-out >}} - -If, for some reason, the NGINX Controller Agent cannot find the NGINX configuration, you can use the following option in `/etc/controller-agent/agent.conf` to point to the configuration file: - -``` nginx -[nginx] -configfile = /etc/nginx/nginx.conf -``` - -{{< call-out "note" >}} We recommend using this option only as a workaround if needed. If you do need to add the path to the NGINX config file, we ask that you [contact NGINX Support]({{< ref "/controller/support/contact-support.md" >}}) so they can help troubleshoot the issue.{{< /call-out >}} - -## Set Host Tags - -You can define arbitrary tags on a "per-host" basis. Tags can be configured in the Controller user interface on the **Graphs** page, or set in the `/etc/controller-agent/agent.conf` file: - -``` nginx -[credentials] -tags = foo bar foo:bar -``` - -{{< call-out "note" >}} Any changes to instance Tags made in the Controller user interface will overwrite the values stored in `agent.conf`.{{< /call-out >}} - -You can use tags to build custom graphs, configure alerts, and filter the systems on the **Graphs** page in the Controller user interface. - -## Logging to Syslog - -{{< call-out "note" >}} -[NGINX Admin Guide - Logging to Syslog](https://docs.nginx.com/nginx/admin-guide/monitoring/logging/#logging-to-syslog) -{{< /call-out>}} - -The NGINX Controller Agent can collect NGINX log files using `syslog`. This could be useful when you don't keep the NGINX logs on disk, or when monitoring a container environment such as Docker with NGINX Controller. - -To configure the NGINX Controller Agent to send logs to `syslog`: - -1. Add the following to the `/etc/controller-agent/agent.conf` file: - - ``` nginx - [listeners] - keys = syslog-default - - [listener_syslog-default] - address = 127.0.0.1:12000 - ``` - -2. Restart the NGINX Controller Agent. This will reload the configuration, and the Agent will start listening on the specified IP address and port: - - ``` nginx - # service controller-agent restart - ``` - - {{< call-out "important" >}} -Make sure you [add the `syslog` settings to your NGINX configuration file]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md#collect-metrics-from-syslog" >}}) as well. - {{< /call-out >}} - -## Exclude Certain NGINX Log Files - -By default, the NGINX Controller Agent tries to find and watch all `access.log` files described in the NGINX configuration. If there are multiple log files where the same request is logged, the metrics may be counted more than once. - -To exclude specific NGINX log files from the metrics collection, add lines similar to the following to `/etc/controller-agent/agent.conf`: - -``` nginx -[nginx] -exclude_logs=/var/log/nginx/app1/*,access-app1-*.log,sender1-*.log -``` - -## Set Up a Proxy - -If your system is in a DMZ environment without direct access to NGINX Controller, the only way for the NGINX Controller Agent to report collected metrics to NGINX Controller is through a proxy. - -The NGINX Controller Agent will use the usual environment variables common on Linux systems (for example, `https_proxy` or `HTTP_PROXY`). However, you can also define HTTPS proxy manually in `agent.conf`. This can be done as follows: - -``` nginx -[proxies] -https = https://10.20.30.40:3030 -.. -``` - -## Controller Agent Logfile - -The NGINX Controller Agent maintains its log file in `/var/log/nginx-controller/agent.log`. - -Upon installation, the NGINX Controller Agent's log rotation schedule is added to `/etc/logrotate.d/controller-agent`. - -The normal level of logging for the NGINX Controller Agent is `INFO`. If you ever need to debug the NGINX Controller Agent, change the level to `DEBUG` as described below. - -{{< call-out "caution" >}} -The size of the NGINX Controller Agent's log file can proliferate in `DEBUG` mode. You should use `DEBUG` mode only for troubleshooting purposes. -{{< /call-out >}} - -### Change the Agent Log Level - -To change the log level for the NGINX Controller Agent: - -1. Edit the `[loggers]` section of the NGINX Controller Agent configuration file -- `/etc/controller-agent/agent.conf`. -1. Set the `level` to one of the following: - - - error - - info - - debug - - trace - - ```plaintext - [loggers] - level = DEBUG - ... - ``` - -1. [Restart the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/agent-restart.md#Starting-and-Stopping-the-Agent" >}}) to make the changes take effect. - -## What's Next - -- [Set up Metrics Collection]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/config-agent/use-agent-with-docker.md b/content/controller/admin-guides/config-agent/use-agent-with-docker.md deleted file mode 100644 index 12a1786b8..000000000 --- a/content/controller/admin-guides/config-agent/use-agent-with-docker.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Controller Agent in a Docker environment. -nd-docs: DOCS-511 -title: Use the NGINX Controller Agent with Docker -toc: true -weight: 200 -type: -- reference ---- - -## Before You Begin - -We support running the F5 NGINX Controller Agent in a Docker environment on the following distributions: CentOS, Debian, and Ubuntu. - -We **don't support** containerized instances on RHEL 7 and RHEL 8. - -For optimal performance when using the Controller Agent in a Docker environment, the number of containers shouldn't exceed the number of processors on the container host. - -## Running NGINX Controller Agent in Docker - -When running a containerized instance on an Ubuntu or Debian docker host, you need to enable cgroup swap limit capabilities in order for the NGINX Controller Agent to be able to report swap metrics for instances. See [Docker - Linux post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/#your-kernel-does-not-support-cgroup-swap-limit-capabilities) for details. - -Refer to the [nginxinc/docker-nginx-controller](https://github.com/nginxinc/docker-nginx-controller) repository in GitHub for a set of guidelines that you can use today as we continue to enhance the experience. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/_index.md b/content/controller/admin-guides/install/_index.md deleted file mode 100644 index fbede0eb5..000000000 --- a/content/controller/admin-guides/install/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -description: Learn how to install and update F5 NGINX Controller and the NGINX Controller - Agent. -title: Installation -weight: 100 -url: /nginx-controller/admin-guides/install/ ---- - diff --git a/content/controller/admin-guides/install/agent-restart.md b/content/controller/admin-guides/install/agent-restart.md deleted file mode 100644 index acc3e9173..000000000 --- a/content/controller/admin-guides/install/agent-restart.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: How to start, stop, and verify the state of the F5 NGINX Controller Agent - service. -nd-docs: DOCS-251 -title: Manage the NGINX Controller Agent Service -toc: true -weight: 210 -type: -- how-to ---- - -## Starting and Stopping the Agent - -To start, stop, and restart the F5 NGINX Controller Agent, run the following commands on the NGINX Plus system where you installed the Agent. - -Start the NGINX Controller Agent: - -```bash -service controller-agent start -``` - -Stop the NGINX Controller Agent: - -```bash -service controller-agent stop -``` - -Restart the NGINX Controller Agent: - -```bash -service controller-agent restart -``` - -## Verify that the Agent Has Started - -To verify that the NGINX Controller Agent has started, run the following command on the NGINX Plus system where you installed the Agent: - -```bash -ps ax | grep -i 'controller\-' -2552 ? S 0:00 controller-agent -``` - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/get-n-plus-cert-and-key.md b/content/controller/admin-guides/install/get-n-plus-cert-and-key.md deleted file mode 100644 index 9b980d52f..000000000 --- a/content/controller/admin-guides/install/get-n-plus-cert-and-key.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: How to download the F5 NGINX Plus nginx.crt and nginx.key files using - the NGINX Controller API. -nd-docs: DOCS-252 -title: Download the NGINX Plus Cert and Key Bundle -toc: true -weight: 105 -type: -- how-to ---- - -## Overview - -This topic explains how to use the [F5 NGINX Controller REST API](https://docs.nginx.com/nginx-controller/api/ctlr-platform-api/) to download your NGINX Plus `nginx.crt` and `nginx.key` files. You'll need these files if you're [installing NGINX Plus as part of an NGINX Controller trial]({{< ref "/controller/admin-guides/install/try-nginx-controller.md" >}}). - -  - -## Authenticate with the NGINX Controller API - -The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. - -{{< call-out "tip" >}} -You can send a GET request to the login endpoint to find the status of the session token. -{{< /call-out >}} - -For example: - -- Login and capture the session cookie: - - ```curl - curl -c cookie.txt -X POST --url 'https:///api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "","password": ""}}' - ``` - -- Use the session cookie to authenticate and get the session status: - - ```curl - curl -b cookie.txt -c cookie.txt -X GET --url 'https:///api/v1/platform/login' - ``` - - -  - ---- - -## Download the NGINX Plus Certificate and Key Bundle - -To use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. - -For example: - -- Download JSON file: - - ```bash - curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json - ``` - -- Download GZIP file: - - ```bash - curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz - ``` - -{{< call-out "note" >}} -If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. -{{< /call-out >}} - - -Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. - -For example: - -```bash -gunzip nginx-plus-certs.gz -``` - ---- - -## What's Next - -- [Trial NGINX Controller with NGINX Plus]({{< ref "/controller/admin-guides/install/try-nginx-controller.md" >}}) - -  - -{{< versions "3.10" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-agent-non-root.md b/content/controller/admin-guides/install/install-agent-non-root.md deleted file mode 100644 index 8c58211cd..000000000 --- a/content/controller/admin-guides/install/install-agent-non-root.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: How to install the F5 NGINX Controller Agent to run as a non-root user. -nd-docs: DOCS-253 -title: Install NGINX Controller Agent for Non-root Users -toc: true -weight: 205 -type: -- tutorial ---- - -## Overview - -This document provides the instructions to run F5 NGINX Controller Agent as a non-root user, by making a few adjustments to the deployment process. - -  - ---- - -## Before You Begin - -Before you follow the steps to deploy and run the Controller Agent as a non-root user, [install NGINX Controller]({{< ref "/controller/admin-guides/install/install-nginx-controller" >}}) following the normal installation process. Once you reach the step **Install NGINX Controller Agent** follow the steps in this guide instead. - -  - ---- - -## Install NGINX Controller Agent to Run as a Non-root User - -Take the following steps to add an instance to NGINX Controller: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Infrastructure**. -1. On the **Infrastructure** menu, select **Instances** > **Overview**. -1. On the **Instances** overview page, select **Create**. -1. On the **Create Instance** page, select **Add an existing instance**. -1. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. -1. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. - - {{< call-out "important" >}} -Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< ref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. - {{< /call-out >}} - -1. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. -1. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. -1. Copy the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. **You need to modify this command to use a non-root user** -1. Add the parameter `CONTROLLER_USER=''` to the `curl` or `wget` command, substituting the value in the brackets with your desired non-root user. -1. (Optional) Add the parameter `CONTROLLER_GROUP=''` to the `curl` or `wget` command, substituting the value in the brackets with your desired group. If this parameter is not set, a new group with the same name as the user will be created. -1. The `curl` or `wget` command looks similar to this example after applying the required changes: - - ```bash - curl -sS -L https:///install/controller-agent > install.sh && API_KEY='' CONTROLLER_USER='' CONTROLLER_GROUP='' -i -l - ``` - - {{< call-out "note" >}} - -Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. - -NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. - -If `CONTROLLER_USER` is not set, during the installation you will see the message `Installing agent to run as root` in red. - -Running agent as non-root changes the nap-syslog port to `5114` in both containerized and non-containerized instances. - - {{< /call-out >}} - -  - -After a few minutes, the NGINX instance will appear on the **Instances** overview page. - -For the NGINX Agent to run properly, NGINX Plus **must** be running as the same user and group as the Agent. To change the user and group NGINX Plus is running as after installing the agent: - -1. Manually edit the `/lib/systemd/system/nginx.service` file and under the `[Service]` block add the lines `User=` and `Group=` replacing the values in brackets with the values chosen during the installation. -1. Run `sudo chown -R : /etc/nginx/ /var/log/nginx/ /var/cache/nginx/` to change the permissions to your non-root user. -1. Ensure the ports NGINX is listening to are all above 1000: Check the NGINX `default.conf` file (usually `/etc/nginx/conf.d/default.conf`) and make sure that the `listen` values are all over `1000`. -1. (CentOS/RHEL) If you're installing the Controller Agent as a non-root user on CentOS or RHEL, make these additional changes: - - - In in the `[Service]` section of `/lib/systemd/system/nginx.service`, set the location for the `PIDfile` to: - - ```nginx - [Service] - PIDFile=/var/tmp/nginx.pid - ``` - - - In `/etc/nginx/nginx.conf`, set the `pid` directive to: - - ```nginx - pid /var/tmp/nginx.pid; - ``` - - -1. Run `sudo systemctl daemon-reload && sudo systemctl restart nginx` to pick up the new configuration. - -  - ---- - -## Verification Steps - -Run `top -u ` for your chosen user. The `/usr/bin/nginx-controller-agent` process will appear in the list of processes. - - -{{< versions "3.16" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-for-controller.md b/content/controller/admin-guides/install/install-for-controller.md deleted file mode 100644 index 7ddbeda31..000000000 --- a/content/controller/admin-guides/install/install-for-controller.md +++ /dev/null @@ -1,533 +0,0 @@ ---- -description: Take the steps in this guide to deploy F5 WAF for NGINX as a - datapath instance for use with NGINX Controller. -nd-docs: DOCS-645 -title: Using F5 WAF for NGINX with NGINX Controller -toc: true -weight: 500 -type: -- how-to ---- - -**Note:** Refer to the [F5 NGINX Controller Technical Specifications]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) guide to find out which distributions are supported for use with NGINX Controller and NGINX Controller Agent. - -## Setup - -Before proceeding, you should review the [Prerequisites]({{< ref "/nap-waf/v4/admin-guide/install#prerequisites" >}}), [Platform Security Considerations]({{< ref "/nap-waf/v4/admin-guide/install#platform-security-considerations" >}}) and [User Permissions]({{< ref "/nap-waf/v4/admin-guide/install#user-permissions" >}}) sections of the F5 WAF for NGINX Admin Guide. - - -## Install F5 WAF for NGINX - -**Note:** If a version of F5 WAF for NGINX prior to 3.6 is required, please contact the NGINX Sales team to assist with this configuration. - -{{}} - -{{%tab name="CentOS 7.4+"%}} - -1. If you already have NGINX packages in your system, back up your configs and logs: - - ```shell - sudo cp -a /etc/nginx /etc/nginx-plus-backup - sudo cp -a /var/log/nginx /var/log/nginx-plus-backup - ``` - -2. Create the `/etc/ssl/nginx/` directory: - - ```shell - sudo mkdir -p /etc/ssl/nginx - ``` - -3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: - - ```shell - nginx-repo.key - nginx-repo.crt - ``` - - **See Also:** You can use the [NGINX Controller REST API to download the key and cert files]({{< ref "/controller/admin-guides/install/get-n-plus-cert-and-key" >}}). - -4. Copy the above two files to the CentOS server's `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. - -5. Install prerequisite packages: - - ```shell - sudo yum install ca-certificates epel-release wget - ``` - -6. Remove any previously downloaded NGINX Plus repository file from /etc/yum.repos.d: - - ```shell - sudo rm /etc/yum.repos.d/nginx-plus-*.repo - ``` - -7. Add NGINX Plus repository by downloading the file nginx-plus-7.4.repo to /etc/yum.repos.d: - - ```shell - sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo - ``` - -8. Add F5 WAF for NGINX repository by downloading the file app-protect-7.repo to /etc/yum.repos.d: - - ```shell - sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo - ``` - -9. If NGINX Plus or F5 WAF for NGINX was previously installed on the system, clean up package manager cache information: - - ```shell - sudo yum clean all - ``` - -10. Install the latest F5 WAF for NGINX package. - - **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. - - If you wish to install a specific version, please replace `app-protect` with the target version, for example `app-protect-25+3.671.0`: - - ```shell - sudo yum install app-protect - ``` - -11. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: - - ```shell - sudo nginx -v - ``` - -12. Configure SELinux as appropriate per your organization’s security policies. F5 WAF for NGINX applies the prebuilt SELinux policy module during the installation. If you encounter any issues, check the [Troubleshooting Guide]({{< ref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). - - **Note:** NGINX Controller has specific [requirements regarding SELinux configuration]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#supported-distributions" >}}). - -13. Start the NGINX service: - - ```shell - sudo systemctl start nginx - ``` - -14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) - - If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: - - ```shell - /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx - ``` - -15. Verify NGINX Plus and BD processes are running: - - ```shell - ps -ef | grep nginx - ps -ef | grep bd - ``` - - **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: - - ```shell - ps -ef | grep bd_agent - ``` - -16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< ref "/nap-waf/v4/admin-guide/install#centos--rhel-74--amazon-linux-2">}}). - -{{%/tab%}} - -{{%tab name="Red Hat Enterprise Linux 7.4+"%}} - -1. If you already have NGINX packages in your system, back up your configs and logs: - - ```shell - sudo cp -a /etc/nginx /etc/nginx-plus-backup - sudo cp -a /var/log/nginx /var/log/nginx-plus-backup - ``` - -2. Create the `/etc/ssl/nginx/` directory: - - ```shell - sudo mkdir -p /etc/ssl/nginx - ``` - -3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: - - ```shell - nginx-repo.key - nginx-repo.crt - ``` - -4. Copy the above two files to the RHEL server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. - -5. Install prerequisite packages: - - ```shell - sudo yum install ca-certificates wget - ``` - -6. Remove any previously downloaded NGINX Plus repository file from /etc/yum.repos.d: - - ```shell - sudo rm /etc/yum.repos.d/nginx-plus-*.repo - ``` - -7. Add NGINX Plus repository by downloading the file `nginx-plus-7.4.repo` to `/etc/yum.repos.d`: - - ```shell - sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/nginx-plus-7.4.repo - ``` - -8. Add F5 WAF for NGINX repository by downloading the file app-protect-7.repo to /etc/yum.repos.d: - - ```shell - sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.repo - ``` - -9. Enable Yum repositories to pull App Protect dependencies: - - - Download the file `dependencies.repo` to `/etc/yum.repos.d`: - - ```shell - sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo - ``` - - - If you have a RHEL subscription: - - ```shell - sudo yum-config-manager --enable rhui-REGION-rhel-server-optional rhui-REGION-rhel-server-releases rhel-7-server-optional-rpms - ``` - - - If you don't have a RHEL subscription, you can pull the dependencies from the CentOS repository. - - Create a new repository, `centos.repo`, in `/etc/yum.repos.d/` with the content: - - ```shell - [centos] - name=CentOS-7 - baseurl=http://ftp.heanet.ie/pub/centos/7/os/x86_64/ - enabled=1 - gpgcheck=1 - gpgkey=http://ftp.heanet.ie/pub/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7 - ``` - -10. If NGINX Plus or F5 WAF for NGINX was previously installed on the system, clean up package manager cache information: - - ```shell - sudo yum clean all - ``` - -11. Install the latest F5 WAF for NGINX package. - - **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. - - If you wish to install a specific version, please replace `app-protect` with the target version, for example `app-protect-25+3.671.0`: - - ```shell - sudo yum install app-protect - ``` - -12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: - - ```shell - sudo nginx -v - ``` - -13. Configure SELinux as appropriate per your organization’s security policies. F5 WAF for NGINX applies the prebuilt SELinux policy module during the installation. If you encounter any issues, check the [Troubleshooting Guide]({{< ref "/nap-waf/v4/troubleshooting-guide/troubleshooting#selinux" >}}). - - **Note:** NGINX Controller has specific [requirements regarding SELinux configuration]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#supported-distributions" >}}). - -14. Start the NGINX service: - - ```shell - sudo systemctl start nginx - ``` - -15. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) - - If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: - - ```shell - /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx - ``` - -16. Verify NGINX Plus and BD processes are running: - - ```shell - ps -ef | grep nginx - ps -ef | grep bd - ``` - - **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: - - ```shell - ps -ef | grep bd_agent - ``` - -17. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< ref "/nap-waf/v4/admin-guide/install#centos--rhel-74--amazon-linux-2" >}}). - -{{%/tab%}} - -{{%tab name="Debian"%}} - -**Note:** As of NGINX Plus R24, support for Debian 9 is no longer available. As a consequence, F5 WAF for NGINX 3.1 is the final version available for this operating system version. - -1. If you already have NGINX packages in your system, back up your configs and logs: - - ```shell - sudo cp -a /etc/nginx /etc/nginx-plus-backup - sudo cp -a /var/log/nginx /var/log/nginx-plus-backup - ``` - -2. Create the `/etc/ssl/nginx/` directory: - - ```shell - sudo mkdir -p /etc/ssl/nginx - ``` - -3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: - - ```shell - nginx-repo.key - nginx-repo.crt - ``` - -4. Copy the above two files to the Debian server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. - -5. Install apt utils: - - ```shell - sudo apt-get install apt-transport-https lsb-release ca-certificates wget - ``` - -6. Download and add the NGINX signing key: - - ```shell - sudo wget https://cs.nginx.com/static/keys/nginx_signing.key && sudo apt-key add nginx_signing.key - ``` - -7. Remove any previous NGINX Plus repository and apt configuration files: - - ```shell - sudo rm /etc/apt/sources.list.d/nginx-plus.list - sudo rm /etc/apt/apt.conf.d/90nginx - ``` - -8. Add NGINX Plus repository: - - ```shell - printf "deb https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list - ``` - -9. Add F5 WAF for NGINX repository: - - ```shell - printf "deb https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect.list - ``` - -10. Download the apt configuration to `/etc/apt/apt.conf.d`: - - ```shell - sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx - ``` - -11. Update the repository and install the lastest supported F5 WAF for NGINX packages. - - **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. - - ```shell - sudo apt-get update - sudo apt-get install nginx-plus-module-appprotect - ``` - - To install a specific version based on the NGINX Plus version, for example `r25`, follow these steps: - - ```shell - sudo apt-cache policy app-protect | grep 25+ - 25+3.760.0-1~buster 500 - 25+3.733.0-1~buster 500 - 25+3.671.0-1~buster 500 - - sudo apt-get install nginx-plus-module-appprotect=25+3.671.0-1~buster - ``` - -12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: - - ```shell - sudo nginx -v - ``` - -13. Start the NGINX service: - - ```shell - sudo systemctl start nginx - ``` - -14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) - - If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, you need to start the `bd_agent`: - - ```shell - /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx - ``` - -15. Verify NGINX Plus and BD processes are running: - - ```shell - ps -ef | grep nginx - ps -ef | grep bd - ``` - - **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that the `bd_agent` is running: - - ```shell - ps -ef | grep bd_agent - ``` - -16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< ref "/nap-waf/v4/admin-guide/install#debian-10" >}}). - -{{%/tab%}} - -{{%tab name="Ubuntu"%}} - -1. If you already have NGINX packages in your system, back up your configs and logs: - - ```shell - sudo cp -a /etc/nginx /etc/nginx-plus-backup - sudo cp -a /var/log/nginx /var/log/nginx-plus-backup - ``` - -2. Create the `/etc/ssl/nginx/` directory: - - ```shell - sudo mkdir -p /etc/ssl/nginx - ``` - -3. Log in to the [NGINX Customer Portal](https://my.f5.com) and download the following two files: - - ```shell - nginx-repo.key - nginx-repo.crt - ``` - -4. Copy the above two files to the Ubuntu server’s `/etc/ssl/nginx/` directory. Use an SCP client or another secure file transfer tool to perform this task. - -5. Install apt utils: - - ```shell - sudo apt-get install apt-transport-https lsb-release ca-certificates wget - ``` - -6. Download and add the NGINX signing key: - - ```shell - sudo wget https://cs.nginx.com/static/keys/nginx_signing.key && sudo apt-key add nginx_signing.key - ``` - -7. Remove any previous NGINX Plus repository and apt configuration files: - - ```shell - sudo rm /etc/apt/sources.list.d/nginx-plus.list - sudo rm /etc/apt/apt.conf.d/90nginx - ``` - -8. Add NGINX Plus repository: - - ```shell - printf "deb https://pkgs.nginx.com/plus/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-plus.list - ``` - -9. Add F5 WAF for NGINX repository: - - ```shell - printf "deb https://pkgs.nginx.com/app-protect/ubuntu `lsb_release -cs` nginx-plus\n" | sudo tee /etc/apt/sources.list.d/nginx-app-protect.list - ``` - -10. Download the apt configuration to `/etc/apt/apt.conf.d`: - - ```shell - sudo wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx - ``` - -11. Update the repository and install the latest F5 WAF for NGINX package. - - **See Also:** Please refer to [NGINX App Protect Compatibility Matrix]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#nginx-app-protect-compatibility-matrix" >}}) for specific version compatibility. - - ```shell - sudo apt-get update - sudo apt-get install app-protect - ``` - - To install a specific version based on the NGINX Plus version, for example `r25`, follow these steps: - - ```shell - sudo apt-cache policy app-protect | grep 25+ - 25+3.760.0-1~bionic 500 - 25+3.733.0-1~bionic 500 - 25+3.671.0-1~bionic 500 - - sudo apt-get install app-protect=25+3.671.0-1~bionic - ``` - -12. Check the NGINX binary version to ensure that you have NGINX Plus installed correctly: - - ```shell - sudo nginx -v - ``` - -13. Start the NGINX service: - - ```shell - sudo systemctl start nginx - ``` - -14. Start the `bd_agent` service (for Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2 only) - - If you plan to use this instance with Controller Agent 3.20.1, you need to start `bd_agent`: - - ```shell - /bin/su -s /bin/bash -c '/opt/app_protect/bin/bd_agent &' nginx - ``` - -15. Verify NGINX Plus and BD processes are running: - - ```shell - ps -ef | grep nginx - ps -ef | grep bd - ``` - - **Note:** If you plan to use this instance with Controller ADC Agent 3.20.1 or Controller APIM Agent 3.19.2, also verify that `bd_agent` is running: - - ```shell - ps -ef | grep bd_agent - ``` - -16. To upgrade your signature package to the latest version and obtain the best protection, refer to [Updating App Protect Attack Signatures]({{< ref "/nap-waf/v4/admin-guide/install#ubuntu-1804" >}}). - - **Note:** Ubuntu 20.04 activates **AppArmor** by default, but F5 WAF for NGINX will run in unconfined mode after being installed as it is shipped with no AppArmor profile. To benefit from AppArmor access control capabilities for F5 WAF for NGINX, you will have to write your own AppArmor profile for F5 WAF for NGINX executables found in `/opt/app_protect/bin` such that it best suits your environment. - -{{%/tab%}} - -{{%tab name="Amazon Linux 2 LTS"%}} - -Using F5 WAF for NGINX with NGINX Controller isn't supported on Amazon Linux 2 LTS. - -{{%/tab%}} - -{{%tab name="Alpine"%}} - -Using F5 WAF for NGINX with NGINX Controller isn't supported on Alpine. - -{{%/tab%}} -{{}} - -
- -## Add F5 WAF for NGINX to NGINX Controller - -If this NGINX Plus instance is already managed by Controller, [restart the Agent]({{< ref "/controller/admin-guides/install/agent-restart" >}}) after F5 WAF for NGINX is installed. - -Otherwise, complete the tasks in the NGINX Controller [Add an F5 WAF for NGINX Instance]({{< ref "/controller/infrastructure/instances/add-nap-instance.md#add-the-nginx-app-protect-instance" >}}) guide. - -## Use F5 WAF for NGINX with NGINX Controller - -**Note:** When configuring F5 WAF for NGINX as a datapath instance for NGINX Controller, **you should not modify the `nginx.conf` file**. The `nginx.conf` file will be automatically updated when enabling WAF on a Component in NGINX Controller. - -Refer to the following NGINX Controller user guides for further information about how to secure your apps and/or APIs with NGINX Controller: - -- [Learn about App Security for the NGINX Controller App Delivery module]({{< ref "/controller/app-delivery/security/concepts/what-is-waf" >}}) -- [Add Security to your Apps with the NGINX Controller App Delivery module]({{< ref "/controller/app-delivery/security/tutorials/add-app-security-with-waf" >}}) -- [Add Advanced Security (WAF) to your APIs with the NGINX Controller API Management module]({{< ref "/controller/api-management/manage-apis.md#define-the-routing-rules" >}}). diff --git a/content/controller/admin-guides/install/install-nginx-controller-agent.md b/content/controller/admin-guides/install/install-nginx-controller-agent.md deleted file mode 100644 index 65aa4ab54..000000000 --- a/content/controller/admin-guides/install/install-nginx-controller-agent.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -description: How to install, upgrade, and uninstall the F5 Controller Agent. -nd-docs: DOCS-254 -title: Install NGINX Controller Agent -toc: true -weight: 150 -type: -- tutorial ---- - -## Overview - -This page shows how to install, update, and uninstall the F5 NGINX Controller Agent. - -You can use the NGINX Controller Agent to monitor your systems with the NGINX Controller. - -## Objectives - -- Install the NGINX Controller Agent -- Upgrade the NGINX Controller Agent to a newer version -- Uninstall the NGINX Controller Agent - -## Install the NGINX Controller Agent - -{{< call-out "note" >}} If you want to run the NGINX Controller Agent as a non-root user, follow the alternative instructions in the [Install NGINX Controller Agent for Non-root User]({{< ref "/controller/admin-guides/install/install-agent-non-root.md" >}}) guide instead of the steps provided in this section. {{< /call-out>}} - -Take the following steps to add an instance to NGINX Controller: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Infrastructure**. -3. On the **Infrastructure** menu, select **Instances** > **Overview**. -4. On the **Instances** overview page, select **Create**. -5. On the **Create Instance** page, select **Add an existing instance**. -6. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. -7. To add the instance to an existing [Instance Group]({{< ref "/controller/infrastructure/instances/manage-instances.md#instance-groups" >}}), select an Instance Group from the list. Or to create an Instance Group, select **Create New**. -8. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. - - {{< call-out "important" >}} -Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< ref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. - {{< /call-out >}} - - {{< call-out "important" >}} -Instances and the instance groups they belong to should specify the same location; however, this requirement is not currently enforced. If different locations are specified, the instance group's location takes precedence. This is important to remember when [assigning locations to workload groups]({{< ref "/controller/app-delivery/manage-apps.md#workload-groups">}}). - {{< /call-out >}} - -9. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. -10. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. -11. Run the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. - - {{< call-out "note" >}} - -Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. - -NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. - - {{< /call-out >}} - -After a few minutes, the NGINX instance will appear on the **Instances** overview page. - - -## Update the NGINX Controller Agent - -When you [update NGINX Controller]({{< ref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}), you also need to update the NGINX Controller Agent software on each monitored NGINX Plus instance. - -To update the NGINX Controller Agent, take the following steps: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Infrastructure**. -1. On the **Infrastructure** menu, select **Instances** > **Overview**. -1. On the **Instances** overview page, select **Create**. -1. Follow the instructions in the **Install Instructions** pane to connect to the NGINX instance and install the updated Controller Agent package. - - {{< call-out "note" >}} - -NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller 3.7 and later. - - {{< /call-out >}} - - -## Uninstall the Analytics, Visibility, and Reporting Daemon (AVRD) - -NGINX Controller uses an [Analytics, Visibility, and Reporting daemon (AVRD)]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) to aggregate and report app-centric metrics. You can use these metrics to monitor your apps' performance and health. - -To uninstall AVRD and the supporting modules, run the following command on each dataplane instance: - -- Debian/Ubuntu - - ```bash - sudo apt-get purge avrd nginx-plus-module-metrics avrd-libs - ``` - -- RedHat/CentOS - - ```bash - sudo yum remove avrd avrd-metrics nginx-plus-module-metrics - ``` - - -## Uninstall the NGINX Controller Agent and Delete an Instance - -Take the following steps to uninstall the Controller Agent and delete an instance. - -{{< call-out "important" >}}Be sure to uninstall the Controller Agent first, before you delete an instance. If you don't uninstall the Controller Agent first, the instance may reappear in NGINX Controller after it has been deleted.{{< /call-out >}} - -1. On your NGINX Plus instance, stop the Controller Agent service: - - - On Ubuntu/Debian: - - ```bash - service controller-agent stop - ``` - - - On CentOS/Red Hat Enterprise Linux: - - ```bash - systemctl stop controller-agent - ``` - -1. Run the appropriate command for your distribution to uninstall the Controller Agent: - - - On Ubuntu/Debian: - - ``` bash - apt-get purge nginx-controller-agent - ``` - - - On CentOS/Red Hat Enterprise Linux: - - ``` bash - yum remove nginx-controller-agent - ``` - - After the package is removed, you can safely delete the files in `/etc/controller-agent/` and `/var/log/nginx-controller/`. - -1. (Optional) If you use SELinux on CentOS or Red Hat Enterprise Linux, take the following steps to remove the SELinux policy that was created when the Controller Agent was installed: - - 1. Revert the installed permissions: - - ```bash - sudo semodule -r nginx - ``` - - 1. Remove the following files: - - - `nginx.te` - - `nginx.mod` - - `nginx.pp` - -1. Delete the NGINX Plus instance from the NGINX Controller user interface: - - 1. Open the NGINX Controller user interface and log in. - - 1. Select the NGINX Controller menu icon, then select **Infrastructure**. - - 1. On the **Infrastructure** menu, select **Instances** > **Overview**. - - 1. On the **Instances** overview page, select the NGINX Plus instance that you want to delete. - - 1. Select the delete icon (trash can). - -1. Delete alerts: - - {{< call-out "note" >}}When you delete an instance, any related alerts for that instance are not deleted automatically. You can delete the alerts manually, however.{{< /call-out >}} - - 1. Open the NGINX Controller user interface and log in. - 2. On the Analytics menu, select **Alerts > Alert Rules**. - 3. Select the alert rule that you want to delete. - 4. Select the delete (trash can) icon to delete the alert rule. - 5. Select **Delete** in the pop-up box to confirm that you want to proceed. - - -## What's Next - -- [Customize how the NGINX Controller Agent collects metrics]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) -- [Start or Stop the Agent Service]({{< ref "/controller/admin-guides/install/agent-restart.md" >}}) -- [Manage your NGINX Instances]({{< ref "/controller/infrastructure/instances/manage-instances.md" >}}) -- [Manage Locations for your Instances]({{< ref "/controller/infrastructure/locations/manage-locations.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md b/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md deleted file mode 100644 index 34431a5a2..000000000 --- a/content/controller/admin-guides/install/install-nginx-controller-rhel-8.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -description: This guide explains how to prepare your RHEL 8 system to install F5 NGINX - Controller and the NGINX Controller Agent. -nd-docs: DOCS-342 -title: Install NGINX Controller on RHEL 8 (experimental) -toc: true -weight: 200 -type: -- tutorial ---- - -## Preparing the F5 NGINX Controller Host - -To install NGINX Controller on RHEL 8, you must complete the following steps to allow iptables-based routing for Kubernetes. Failure to complete these steps may cause the installation to hang. - -### Update System Packages - -1. Before completing any other steps, update the packages on your system: - - ```bash - sudo yum -y upgrade - ``` - -### Install and Configure Docker - -Docker isn't available on RedHat 8 by default, so you'll need to add a Docker repository and install the required packages: - -1. Add the Docker repo: - - ```bash - sudo yum config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - ``` - -1. Install the Docker packages: - - ```bash - sudo yum install docker-ce-3:19.03.15-3.el8 docker-ce-cli-1:19.03.15-3.el8 containerd.io-1.3.9-3.1.el8 - ``` - -1. Set up the Docker daemon: - - ```bash - sudo mkdir -p /etc/docker - - sudo vi /etc/docker/daemon.json - ``` - - Paste the following JSON snippet into `daemon.json`: - - ```json - { - "exec-opts": ["native.cgroupdriver=systemd"], - "log-driver": "json-file", - "log-opts": { - "max-size": "10m", - "max-file": "2" - }, - "storage-driver": "overlay2" - } - ``` - -1. Run the following commands to set up the Docker service: - - ```bash - sudo systemctl start docker.service - - sudo systemctl status docker.service - - sudo systemctl enable docker.service - ``` - -### Install Required Packages and Kernel Modules - -Take the following steps to install the required packages and kernel modules. - -1. Install the traffic control utility: - - ``` bash - sudo yum install iproute-tc - ``` - -1. Run the following commands to ensure the required kernel modules are loaded at startup: - - ```bash - cat <}}). - -## Preparing the Data Plane Host - -1. For the NGINX Controller Agent to work on RHEL 8, you need to install the following package on each data plane host: - - ``` bash - sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - sudo dnf install -y xerces-c - ``` - -2. Complete the steps in the NGINX Controller Agent Installation guide to [install the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent" >}}). - -## Troubleshooting - -You may encounter the following error when installing or updating NGINX Controller on RHEL 8: - -``` text -Status code: 403 for https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/repomd.xml -``` - -In this case, update your subscription manager on each RHEL 8 host as follows: - -```bash -sudo subscription-manager refresh -``` diff --git a/content/controller/admin-guides/install/install-nginx-controller.md b/content/controller/admin-guides/install/install-nginx-controller.md deleted file mode 100644 index 63ad87cb7..000000000 --- a/content/controller/admin-guides/install/install-nginx-controller.md +++ /dev/null @@ -1,664 +0,0 @@ ---- -description: This guide explains how to install and update F5 NGINX Controller. -nd-docs: DOCS-255 -title: Install NGINX Controller -toc: true -weight: 120 -type: -- tutorial ---- - -## Overview - -F5 NGINX Controller is NGINX's control-plane solution that manages the NGINX data plane. Built on a modular architecture, NGINX Controller enables you to manage the entire lifecycle of NGINX Plus, whether it's deployed as a load balancer, API gateway, or a proxy in a service mesh environment. - -To get started, download and run the installer. The installer will: - -- Perform prerequisite checks on your system and prompt for any missing dependencies. -- Prompt you to accept the terms of service agreement for NGINX Controller. -- Ask you for a series of parameters including Database, SMTP, Admin user, and FQDN settings. -- Place configuration and log files in appropriate file locations on your host system. -- Add extra repositories to the default package manager like `apt` or `yum` and install required packages. -- Launch NGINX Controller. - -  - ---- - -### Open Source Software Dependencies - -NGINX Controller uses a number of open source software packages in the product. You can find information about these dependencies in the [NGINX Controller Technical Specifications]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}). - -  - ---- - -## Before You Begin - -Before installing NGINX Controller, review the following prerequisites. - -{{< call-out "important" >}} -NGINX Controller should be deployed on a secure, internal network only. We strongly recommend against exposing the NGINX Controller API to the internet. -{{< /call-out >}} - -Things you'll need before installing NGINX Controller: - -- The `controller-installer-.tar.gz` package, downloaded from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads); - -- A license file for NGINX Controller, accessible via the [MyF5 Customer Portal](https://account.f5.com/myf5); - -- A dedicated environment (bare metal, VM, or cloud-hosted instance) on which to install NGINX Controller. For the supported Operating Systems and recommended specifications, see the [NGINX Controller Technical Specifications]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs" >}}) guide; - -  - ---- - -## Install NGINX Controller Prerequisites - -You can use the NGINX Controller `helper.sh prereqs` command to install the required system packages and Docker CE. - - - -| Options | Description | -|----------|-------------| -| `base` | Install the required Linux utilities. | -| `docker` | Install Docker CE. | -| `nfs` | Install NFS system packages. | - -To install all of the NGINX Controller prerequisites for your system at the same time, take the following steps: - -1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). - -1. Extract the installer package files: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the helper script with the `prereqs` option: - - ```bash - cd controller-installer - ./helper.sh prereqs - ``` - -{{< call-out "note" >}} -After you've installed NGINX Controller, you can install any of the prerequisites by running the following command: - - ```bash -/opt/nginx-controller/helper.sh prereqs [base|docker|nfs] -``` - -{{< /call-out >}} - -  - ---- - -### Linux Utilities - -The following Linux utilities are required by the installation script. The script will let you know if any of the utilities are missing. - -- `awk` -- `bash` (4.0 or later) -- `conntrack` -- `coreutils`: `base64`, `basename`, `cat`, `comm`, `dirname`, `head`, `id`, `mkdir`, `numfmt`, `sort`, `tee` -- `curl` or `wget` -- `ebtables` -- `envsubst` (provided by the `gettext` package) -- `ethtool` -- `getent` -- `grep` -- `gunzip` (provided by the `gzip` package) -- `iproute` -- `iptables` -- `jq` (1.5 or later) -- `less` -- `openssl` -- `sed` -- `socat` -- `tar` -- `util-linux` -- `yum-plugin-versionlock` on RedHat/CentOS - -  - ---- - -### Docker Requirements - -If you have Internet access, NGINX Controller will install Docker for you as part of the installation process. - -If you prefer to install Docker on the host yourself, install the following: - -- [Docker Community Edition (CE)](https://docs.docker.com/engine/install/) 18.09 -- [Containerd.io](https://containerd.io/) 1.2.10 - -If you are using Ubuntu-20.04 and want to install Docker on your own, choose the following versions instead: - -- [Docker Community Edition (CE)](https://docs.docker.com/engine/install/ubuntu/) 19.03 -- [Containerd.io](https://containerd.io/) 1.2.13 - -{{< call-out "note" >}} -For instructions on installing Docker in offline scenarios on CentOS/RHEL 7, refer to the AskF5 [K84431427](https://support.f5.com/csp/article/K84431427) knowledge base article.{{< /call-out>}} - -{{< call-out "important" >}} You need to enable Docker log rotation to ensure that the logs don't consume all the free disk space on the server. For instructions on how to enable Docker log rotation, see the Docker guides [Configure logging drivers](https://docs.docker.com/config/containers/logging/configure/) and [JSON File logging driver](https://docs.docker.com/config/containers/logging/json-file/).{{< /call-out >}}  - -#### Red Hat Enterprise Linux - -To create container images on Red Hat Enterprise Linux, Red Hat requires you to register and entitle the host computer on which you'll build them. In this case, the host is where you're installing NGINX Controller. Once the host is registered with Red Hat, you can install Docker from the Red Hat Enterprise Linux Extras repository. See the [Red Hat "Getting Started with Containers"](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7) guide for instructions. - - -  - ---- - -### Kubernetes Requirements - -NGINX Controller ships with a required version of Kubernetes and will install Kubernetes for you. Be sure to install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. - -The following table lists the Kubernetes versions that are installed by NGINX Controller: - - -| NGINX Controller | Kubernetes | -|-----------------------|--------------------| -| v3.x | v1.15.5 | - -The [Kubernetes Pod DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) has a limit of six configured DNS search domain names. This is also the [`glibc` limit](https://man7.org/linux/man-pages/man5/resolv.conf.5.html). - -In NGINX Controller, Core-DNS creates three search domains that are determined at run-time and not in `/etc/resolv.conf`: - -- `.svc.cluster.local` -- `svc.cluster.local` -- `cluster.local ` - -In general, changing the settings in NGINX Controller's underlying Kubernetes cluster is not recommended. However, if you do change the cluster's Pod config to allow additional search domains, **you should not add more than three domains**. - - -  - ---- - -### PostgreSQL (Optional) - -When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) config database for you; this is the recommended implementation. If you choose to use the embedded, self-hosted config database, you can skip this section. - -Alternatively, you can install your own PostgreSQL database for the config database, which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Continue reading if you're providing your own PostgreSQL database. - -Refer to the AskF5 KB article [K49481224](https://support.f5.com/csp/article/K49481224) for instructions on how to install PostgreSQL on CentOS 7 and Ubuntu 18.04 for use with NGINX Controller. - -- NGINX Controller supports the following versions of PostgreSQL: - - - PostgreSQL 12.x -- works with NGINX Controller 3.9 and later. - - PostgreSQL 9.5 -- works with NGINX Controller 3.0 and later. - -- The PostgreSQL database must be accessible from the NGINX Controller server. You can use a DNS-resolvable name or an IP address to connect to the database server (names in `/etc/hosts` are not allowed). -- Create the user with the `Create DB` permission. -- Configure PostgreSQL to allow SSL connections; client certificates should also be used for user authentication. - - **We strongly discourage disabling SSL for PostgreSQL for security reasons.** Consult the *Secure TCP/IP Connections with SSL* topic in the PostgreSQL manual for instructions and details: - - - [PostgreSQL 9.5](https://www.postgresql.org/docs/9.5/ssl-tcp.html) - - [PostgreSQL 12.x](https://www.postgresql.org/docs/12/ssl-tcp.html) - -- When installed on external NFS or EFS volumes, the config database should support a throughput of 2 MiB/s or greater. - - -  - ---- - -## Install NGINX Controller - -Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. - -{{< call-out "important" >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /call-out >}} - -{{< call-out "caution" >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< ref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /call-out >}} - -To install NGINX Controller, take the following steps: - -1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). -1. Extract the installer package files: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the install script: - - ```bash - cd controller-installer - ./install.sh - ``` - - {{< call-out "important" >}}Installing NGINX Controller as `root` is **not supported** on multi-node clusters. Instead, create a user with `sudo` permission for installing and performing all operations with NGINX Controller. Further, NGINX Controller scripts should also run with this dedicated user; scripts shouldn't be run as `sudo`, `sudo su`, or as the `root` user directly.{{< /call-out >}} - - {{< call-out "note" >}}If an HTTPS proxy is configured for the whole system, you should disable the proxy for the IP address and hostname of the host that you're running the NGINX Controller install script on. - For example, run the command `export NO_PROXY=,`. {{< /call-out >}} - - The installation script walks through a series of steps and asks for the following input: - - - **Config database configuration**. Specify whether to use an embedded, self-hosted PostgreSQL database for the config database, or if you want to provide your own external PostgreSQL database. If you choose to provide your own database, make sure you've reviewed the [PostgreSQL prerequisites](#postgresql-optional). - - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` only for demo and trial purposes. - - {{< call-out "note" >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /call-out>}} - - - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. - - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. - - **SMTP** - - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. - - **SMTP Port**: The port of the SMTP server. - - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. - - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. - - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. - - **Admin** - - **First name**: The first name for the initial admin user. - - **Last name**: The last name for the initial admin user. - - **Email address**: The contact email address for the initial admin user. - - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. - - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. The FQDN is used by Controller Agents when connecting to NGINX Controller. - {{< call-out "note" >}}We recommend setting the FQDN to a internal address when possible, to avoid exposing the traffic between the Agent and NGINX Controller. This also reduces the external traffic in cloud environments. {{< /call-out >}} - - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. - - {{< call-out "important" >}}If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. If the certificate contains a wildcard Common Name (CN=*.example.com) it must also contain a Subject Alternate Name (SAN=nginx-controller.example.com). {{< /call-out >}} - -1. Log in to the NGINX Controller browser interface by navigating to the DNS, FQDN, or IP address of the NGINX Controller host, for example, `https:///login`. Use the admin email address and password that you provided during the installation process. - -1. Once the NGINX Controller installation has completed, you may safely delete the installer package that you downloaded and extracted. - - -  - ---- - -## License NGINX Controller - -To add a license to NGINX Controller, take the following steps: - -1. Go to `https:///platform/license` and log in. -1. In the **Upload a license** section, select an upload option: - - - **Upload license file** -- Locate and select your license file in the file explorer. - - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). - -1. Select **Save license**. - -{{< call-out "note" >}} -To add a license using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. -{{< /call-out>}} - - -  - ---- - -## Back Up Cluster Config and Encryption Keys - -After installing NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. - -- To back up the NGINX Controller cluster configuration and encryption keys: - - ```bash - /opt/nginx-controller/helper.sh cluster-config save - ``` - - The file is saved to `/opt/nginx-controller/cluster-config.tgz`. - -- To restore the cluster's config and encryption keys, take the following steps: - - ```bash - /opt/nginx-controller/helper.sh cluster-config load - ``` - -  - ---- - -## Manage the NGINX Controller Process - -You can use the `helper.sh` script to start, stop, restart, and check the status of the NGINX Controller process. - -``` bash -/opt/nginx-controller/helper.sh controller start -/opt/nginx-controller/helper.sh controller stop -/opt/nginx-controller/helper.sh controller restart -/opt/nginx-controller/helper.sh controller status -``` - -  - ---- - -## Update NGINX Controller - -To update the NGINX Controller software, take the steps below. When complete, you must also update the Controller Agent software on each monitored NGINX Plus instance. - -When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. - -{{< call-out "warning" >}} Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /call-out >}} - -{{< call-out "caution" >}} -We strongly recommend that you make a backup of the following information before proceeding, to avoid potential data and/or configuration loss: - -- [Back up the NGINX Controller databases]({{< ref "/controller/admin-guides/backup-restore" >}}). -- Back up the NGINX Controller cluster configuration and encryption keys. These are required if you need to restore the config database on top of a new installation of NGINX Controller. - - ```bash - /opt/nginx-controller/helper.sh cluster-config save - ``` - -- Back up the Controller Agent `agent.conf` file by copying it from its current location to a new location. This file is present on each NGINX Plus instance. - - ```bash - cp /etc/controller-agent/agent.conf - ``` - -{{< /call-out >}} - -1. Download the installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). - -1. Extract the installer package files: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Before updating, check the NGINX Controller status to confirm the installation is healthy. - - ```bash - ./helper.sh controller status - ``` - - Resolve any degradations before updating. - -1. Run the update script: - - ```bash - cd controller-installer - ./update.sh - ``` - - {{< call-out "note" >}}If you're upgrading from an older version of NGINX Controller and you installed Controller as root user, use `--allow-with-root` flag when running an update script. {{< /call-out >}} - -1. If you are logged in to NGINX Controller using a web browser, sign out and log in again. - - - To sign out, select your username in the upper right-hand corner, and then select "Sign Out". For optimal performance, also flush your browser cache. - -{{< call-out "important" >}} After you upgrade NGINX Controller, you also need to [update the NGINX Controller Agent]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent" >}}) to the latest version. {{< /call-out >}} - -  - ---- - -## Uninstall NGINX Controller - -To uninstall NGINX Controller, run the uninstall script: - -```bash -/opt/nginx-controller/uninstall.sh -``` - -  - ---- - -## Install NGINX Controller Agent -{{< call-out "note" >}} If you want to run the NGINX Controller Agent as a non-root user, follow the alternative instructions in the [Install NGINX Controller Agent for Non-root User]({{< ref "/controller/admin-guides/install/install-agent-non-root.md" >}}) guide instead of the steps provided in this section. {{< /call-out>}} - -Install the Controller Agent on each NGINX Plus instance that you want to manage and monitor. - -Take the following steps to add an instance to NGINX Controller: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Infrastructure**. -3. On the **Infrastructure** menu, select **Instances** > **Overview**. -4. On the **Instances** overview page, select **Create**. -5. On the **Create Instance** page, select **Add an existing instance**. -6. Add a name for the instance. If you don't provide a name, the hostname of the instance is used by default. -7. To add the instance to an existing [Instance Group]({{< ref "/controller/infrastructure/instances/manage-instances.md#instance-groups" >}}), select an Instance Group from the list. Or to create an Instance Group, select **Create New**. -8. To add the instance to an existing Location, select a Location from the list. Or to create a Location, select **Create New**. - - {{< call-out "important" >}} -Once set, the Location for an instance cannot be changed. If you need to change or remove the Location for an instance, you must [remove the instance from NGINX Controller]({{< ref "/controller/infrastructure/instances/manage-instances.md#delete-an-instance" >}}), and then add it back. - {{< /call-out >}} - - {{< call-out "important" >}} -Instances and the instance groups they belong to should specify the same location; however, this requirement is not currently enforced. If different locations are specified, the instance group's location takes precedence. This is important to remember when [assigning locations to workload groups]({{< ref "/controller/app-delivery/manage-apps.md#workload-groups">}}). - {{< /call-out >}} - -9. (Optional) By default, registration of NGINX Plus instances is performed over a secure connection. To use self-signed certificates with the Controller Agent, select **Allow insecure server connections to NGINX Controller using TLS**. For security purposes, we recommend that you secure the Controller Agent with signed certificates when possible. -10. Use SSH to connect and log in to the NGINX instance that you want to connect to NGINX Controller. -11. Run the `curl` or `wget` command that's shown in the **Installation Instructions** section on the NGINX instance to download and install the Controller Agent package. When specified, the `-i` and `-l` options for the `install.sh` script refer to the instance name and Location, respectively. - - {{< call-out "note" >}} - -Make sure you enter the commands to download and run the `install.sh` script on the NGINX Plus system, and not on the NGINX Controller. - -NGINX Controller 3.6 and earlier require Python 2.6 or 2.7. You'll be prompted to install Python if it's not installed already. Python is not required for NGINX Controller v3.7 and later. - - {{< /call-out >}} - -After a few minutes, the NGINX instance will appear on the **Instances** overview page. - - -  - ---- - -## Troubleshooting - -If NGINX Controller isn't working how you expect, see the knowledge base article [K03263142](https://support.f5.com/csp/article/K03263142) for installation troubleshooting procedures. - -### Create a Support Package - -You can create a support package for NGINX Controller that you can use to diagnose issues. - -{{< call-out "note" >}} -You will need to provide a support package if you open a ticket with NGINX Support via the [MyF5 Customer Portal](https://account.f5.com/myf5). -{{< /call-out >}}  - -```bash -/opt/nginx-controller/helper.sh supportpkg [-o|--output ] [-s|--skip-db-dump] [-t|--timeseries-dump ] -``` - - - -| Options | Description | -|----------|-------------| -| `-o` \| `--output` | Save the support package file to ``. | -| `-s` \| `--skip-db-dump` | Don't include the database dump in the support package. | -| `-t` \| `--timeseries-dump ` | Include the last `` of timeseries data in the support package (default 12 hours). | - -Take the following steps to create a support package: - -1. Open a secure shell (SSH) connection to the NGINX Controller host and log in as an administrator. - -1. Run the `helper.sh` utility with the `supportpkg` option: - - ```bash - /opt/nginx-controller/helper.sh supportpkg - ``` - - The support package is saved to: - - `/var/tmp/supportpkg-.tar.gz` - - For example: - - `/var/tmp/supportpkg-20200127T063000PST.tar.gz` - -1. Run the following command on the machine where you want to download the support package to: - - ``` bash - scp @:/var/tmp/supportpkg-.tar.gz /local/path - ``` - -  - -#### Support Package Details - -The support package is a tarball that includes NGINX Controller configuration information, logs, and system command output. Sensitive information, including certificate keys, is not included in the support package. - -The support package gathers information from the following locations: - -```md -. -├── database -│   ├── common.dump - full dump of the common database -│   ├── common.dump_stderr - any errors when dumping the database -│   ├── common-apimgmt-api-client-api-keys.txt - contents of apimgmt_api_client_api_keys table from the common database -│   ├── common-apimgmt-api-client-groups.txt - contents of apimgmt_api_client_groups table from the common database -│   ├── common-email-verification.txt - contents of email_verification table from the common database -│   ├── common-oauth-clients.txt - contents of oauth_clients table from the common database -│   ├── common-settings-license.txt - contents of settings_license table from the common database -│   ├── common-settings-nginx-plus.txt - contents of settings_nginx_plus table from the common database -│   ├── common-table-size.txt - list of all tables and their size in the common database -│   ├── data-table-size.txt - list of all tables and their size in the data database -│   ├── postgres-database-size.txt - size of every database -│   ├── postgres-long-running-queries.txt - all queries running longer than 10 seconds -│   ├── system.dump - full dump of the system database -│   ├── system-account-limits.txt - contents of account_limits table from the system database -│   ├── system-accounts.txt - contents of accounts table from the system database -│   ├── system-deleted-accounts.txt - contents of deleted_accounts table from the system database -│   ├── system-deleted-users.txt - contents of deleted_users table from the system database -│   ├── system-users.txt - contents of users table from the system database -│   └── system-table-size.txt - list of all tables and their size in the system database -├── k8s - output of `kubectl cluster-info dump -o yaml` augmented with some extra info -│   ├── apiservices.txt - output of `kubectl get apiservice` -│   ├── kube-system - contents of the kube-system namespace -│   │   ├── coredns-5c98db65d4-6flb9 -│   │   │   ├── desc.txt - pod description -│   │   │   ├── logs.txt - current logs -│   │   │   └── previous-logs.txt - previous logs, if any -│   │   ├── ... -│   │   ├── daemonsets.yaml - list of daemonsets -│   │   ├── deployments.yaml - list of deployments -│   │   ├── events.yaml - all events in this namespace -│   │   ├── namespace.yaml - details of the namespace, including finalizers -│   │   ├── pods.txt - output of `kubectl get pods --show-kind=true -o wide` -│   │   ├── pods.yaml - list of all pods -│   │   ├── replicasets.yaml - list of replicasets -│   │   ├── replication-controllers.yaml - list of replication controllers -│   │   ├── resources.txt - all Kubernetes resources in this namespace -│   │   └── services.yaml - list of services -│   ├── nginx-controller - contents of the nginx-controller namespace -│   │   ├── apigw-8fb64f768-9qwcm -│   │   │   ├── desc.txt - pod description -│   │   │   ├── logs.txt - current logs -│   │   │   └── previous-logs.txt - previous logs, if any -│   │   ├── ... -│   │   ├── daemonsets.yaml - list of daemonsets -│   │   ├── deployments.yaml - list of deployments -│   │   ├── events.yaml - all events in this namespace -│   │   ├── namespace.yaml - details of the namespace, including finalizers -│   │   ├── pods.txt - output of `kubectl get pods --show-kind=true -o wide` -│   │   ├── pods.yaml - list of all pods -│   │   ├── replicasets.yaml - list of replicasets -│   │   ├── replication-controllers.yaml - list of replication controllers -│   │   ├── resources.txt - all Kubernetes resources in this namespace -│   │   ├── services.yaml - list of services -│   ├── nodes.txt - output of `kubectl describe nodes` -│   ├── nodes.yaml - list of nodes -│   ├── resources.txt - all non-namespaced Kubernetes resources (including PersistentVolumes) -│   └── version.yaml - Kubernetes version -├── logs - copy of /var/log/nginx-controller/ -│   └── nginx-controller-install.log -├── os -│   ├── cpuinfo.txt - output of `cat /proc/cpuinfo` -│   ├── df-h.txt - output of `df -h` -│   ├── df-i.txt - output of `df -i` -│   ├── docker-container-ps.txt - output of `docker container ps` -│   ├── docker-images.txt - output of `docker images` -│   ├── docker-info.txt - output of `docker info` -│   ├── docker-stats.txt - output of `docker stats --all --no-stream` -│   ├── docker-version.txt - output of `docker version` -│   ├── du-mcs.txt - output of `du -mcs /opt/nginx-controller/* /var/log /var/lib` -│   ├── env.txt - output of `env` -│   ├── firewall-cmd.txt - output of `firewall-cmd --list-all` -│   ├── free.txt - output of `free -m` -│   ├── hostname-all-fqdns.txt - output of `hostname --all-fqdns` -│   ├── hostname-fqdn.txt - output of `hostname --fqdn` -│   ├── hostname.txt - output of `hostname` -│   ├── hostsfile.txt - output of `cat /etc/hosts` -│   ├── ip-address.txt - output of `ip address` -│   ├── ip-neigh.txt - output of `ip neigh` -│   ├── ip-route.txt - output of `ip route` -│   ├── iptables-filter.txt - output of `iptables -L -n -v` -│   ├── iptables-mangle.txt - output of `iptables -L -n -v -t mangle` -│   ├── iptables-nat.txt - output of `iptables -L -n -v -t nat` -│   ├── iptables-save.txt - output of `iptables-save` -│   ├── journal-kubelet.txt - output of `journalctl -q -u kubelet --no-pager` -│   ├── lspci.txt - output of `lspci -vvv` -│   ├── netstat-nr.txt - output of `netstat -nr` -│   ├── ps-faux.txt - output of `ps faux` -│   ├── pstree.txt - output of `pstree` -│   ├── ps.txt - output of `ps aux --sort=-%mem` -│   ├── resolvconf.txt - output of `cat /etc/resolv.conf` -│   ├── selinux-mode.txt - output of `getenforce` -│   ├── ss-ltunp.txt - output of `ss -ltunp` -│   ├── swapon.txt - output of `swapon -s` -│   ├── sysctl.txt - output of `sysctl -a --ignore` -│   ├── systemd.txt - output of `journalctl -q --utc` -│   ├── top.txt - output of `top -b -o +%CPU -n 3 -d 1 -w512 -c` -│   ├── uname.txt - output of `uname -a` -│   ├── uptime.txt - output of `cat /proc/uptime` -│   └── vmstat.txt - output of `cat /proc/vmstat` -├── timeseries -│ ├── table-sizes.stat - stat table containing controller table sizes -│ ├── events.csv - events table dump in csv -│ ├── events.sql - events table schema -│ ├── metrics_1day.csv - metrics_1day table dump in csv -│ ├── metrics_1day.sql - metrics_1day table schema -│ ├── metrics_1hour.csv - metrics_1hour table dump in csv -│ ├── metrics_1hour.sql - metrics_1hour table schema -│ ├── metrics_5min.csv - metrics_5min table dump in csv -│ ├── metrics_5min.sql - metrics_5min table schema -│ ├── metrics.csv - metrics table dump in csv -│ ├── metrics.sql - metrics table schema -│ ├── system-asynchronous-metrics.stat - shows info about currently executing events or consuming resources -│ ├── system-events.stat - information about the number of events that have occurred in the system -│ ├── system-metrics.stat - system metrics -│ ├── system-parts.stat - information about parts of a table in the MergeTree family -│ ├── system-settings.stat - information about settings that are currently in use -│ └── system-tables.stat - information about all the tables -└── version.txt - Controller version information -``` - - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/nginx-controller-tech-specs.md b/content/controller/admin-guides/install/nginx-controller-tech-specs.md deleted file mode 100644 index de963dce1..000000000 --- a/content/controller/admin-guides/install/nginx-controller-tech-specs.md +++ /dev/null @@ -1,425 +0,0 @@ ---- -description: Guidelines and recommendations for configuring F5 NGINX Controller. -nd-docs: DOCS-256 -title: NGINX Controller Tech Specs -toc: true -weight: 100 -type: -- reference ---- - -## Overview - -This guide lists the technical recommendations for F5 NGINX Controller v3 and NGINX Controller Agent. Review this guide before installing or updating NGINX Controller or NGINX Controller Agent. - -## Supported Distributions - -NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. - -{{< call-out "note" >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /call-out>}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| -|--- |--- |--- |--- |--- |--- | -|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | -|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | -|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support AVRD.| -|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| -|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| -|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | -|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | -|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| -|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | -|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | - -{{< /bootstrap-table >}} - - - - -#### Analytics, Visibility, and Reporting Daemon (AVRD) - -NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) topic. - - -  - ---- - -## Storage Requirements - -The following table shows the minimum storage requirements we recommend for NGINX Controller. Your final storage requirements may differ depending on your environment, configuration, and the number of instances, apps, and APIs you're managing. Production deployments, for example, will require more storage than trial deployments. Contact your NGINX Controller sales associate if you have questions about sizing for your particular environment. - -We recommend using a local volume for the analytics and config databases for trial deployments, for simplicity's sake so you can get started using NGINX Controller right away. For production environments, we recommend using an external volume for the databases for resiliency. - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Resource | Path(s) | Minimum Storage | -|-|-|-| -| NGINX Controller | /opt/nginx-controller | 80 GB | -| Analytics database | /opt/nginx-controller/clickhouse_data | • 50 GB
• 150 GB if App Security is enabled | -| Config database | /opt/nginx-controller/postgres_data | 10 GB | -| Logs | • /var/log/nginx-controller
• /var/log/journal
• /var/log/pods
• /var/lib/docker/containers
• /var/lib/kubelet
• /var/lib/kubernetes| 15 GB cumulative | - -{{< /bootstrap-table >}} - - -  - ---- - -## Supported Deployment Environments - -You can deploy NGINX Controller v3 into the following environments: - -- Bare metal -- Public cloud: Amazon Web Services, Google Cloud Platform, Microsoft Azure -- Virtual Machine - -  - ---- - -## NGINX Plus Instances - -NGINX Controller, using the Controller Agent, can monitor and manage up to 100 [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) instances. When using Controller App Security, NGINX Controller can monitor and manage up to 30 NGINX Plus instances with NGINX App Protect installed. - - -NGINX Controller supports the following [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) versions: - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| NGINX Plus | NGINX Controller | NGINX Controller ADC | NGINX Controller APIM | -|------------|------------------|----------------------|-----------------------| -| R30 | Not supported | 3.22.9+ | Not supported | -| R29 | Not supported | 3.22.9+ | 3.19.6+ | -| R28 | Not supported | 3.22.6+ | 3.19.6+ | -| R27 | Not supported | 3.22.4+ | 3.19.6+ | -| R26 | Not supported | 3.22.2+ | 3.19.6+ | -| R25 | Not supported | 3.20.1+ | 3.19.2+ | -| R24 | 3.17+ | 3.20+ | 3.18+ | -| R23 | 3.12+ | 3.20.0 - 3.22.2 | 3.18+ | -| R22 | 3.5+ | 3.20.0 - 3.22.1 | 3.18+ | -| R21 | 3.5 - 3.12 | Not supported | Not supported | -| R20 | 3.0 - 3.12 | Not supported | Not supported | -| R19 | 2.6 - 3.5 | Not supported | Not supported | - -{{< /bootstrap-table >}} - - -  - ---- - -## NGINX App Protect Compatibility Matrix - -The App Security add-on for the NGINX Controller Application Delivery module is compatible with the versions of NGINX Plus and NGINX App Protect shown in the table below. New releases of NGINX Controller ADC support the last four versions of NGINX Plus at release time. - -{{< call-out "note" >}} -Refer to [Using NGINX App Protect with NGINX Controller]({{< ref "controller/admin-guides/install/install-for-controller.md" >}}) for installation instructions and additional information. -{{< /call-out>}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| NGINX Controller version | NGINX App Protect version(s) | NGINX Plus version(s) | -|-------------------------------------|-------------------------------------------------------------------------------------------------|--------------------------------| -| NGINX Controller ADC v3.22.9 | v4.5
v4.3, v4.4
v4.0, v4.1, v4.2
v3.12, v3.11 | R30
R29
R28
R27 | -| NGINX Controller ADC v3.22.8 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.7 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.6 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.5 | v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | -| NGINX Controller ADC v3.22.4 | v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | -| NGINX Controller ADC v3.22.3 | v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | -| NGINX Controller ADC v3.22.2 | v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | -| NGINX Controller ADC v3.22, v3.22.1 | v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.21 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.20.1 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.20 | v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller APIM v3.19.2 | v3.6
v3.5, v3.4 | R25
R24 | -| NGINX Controller APIM v3.19 | v3.5, v3.4 | R24 | -| NGINX Controller v3.18 | v3.5, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller v3.17 | v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller v3.16 | v3.1, v3.0, v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.14, v3.15 | v3.0, v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.13 | v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.12 | v2.1.1 | R22 | - -{{< /bootstrap-table >}} - ---- - -## Supported Browsers - -NGINX Controller works best with the newest and the last prior version of these browsers with JavaScript, cookies, and SSL enabled: - -- [Google Chrome](https://www.google.com/chrome/) -- [Firefox](https://www.mozilla.org/en-US/firefox/new/) -- [Safari](https://support.apple.com/downloads/safari) -- [Internet Explorer](https://support.microsoft.com/en-us/help/17621/internet-explorer-downloads) and [Microsoft Edge](https://www.microsoft.com/en-us/edge) - -{{< call-out "important" >}} -You may need to turn off any ad blockers while using the NGINX Controller user interface. - -In some cases, the NGINX Controller user interface may not display analytics or security events if an ad blocker is enabled. Refer to the AskF5 KB article [K48603454](https://support.f5.com/csp/article/K48903454) to learn more about this issue and how to resolve it. -{{< /call-out >}} - - -  - ---- - -## Hardware Specifications - -The following minimum hardware specifications are required for each node running NGINX Controller: - -- RAM: 8 GB RAM -- CPU: 8-Core CPU @ 2.40 GHz or similar -- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. - -The NGINX Controller Agent consumes as little memory and CPU as possible. CPU usage should be under 10%, and RSS memory consumption should be just a few dozen MBs. If you notice the NGINX Controller Agent consuming resources at a higher rate, you should [contact NGINX Support]({{< ref "/controller/support/contact-support.md" >}}) for assistance. - -  - ---- - -## NGINX Controller Database Requirements - -When installing NGINX Controller, you can choose the type of volume to use for the analytics and config databases. The types of volumes that are supported are: - -- [Local Storage](#local-storage) -- [NFS](#nfs) -- [AWS EBS](#aws-ebs) - -We recommend using a local volume for the analytics and config databases for trial deployments, for simplicity's sake so you can get started using NGINX Controller right away. For production environments, we recommend using an external volume for the databases for resiliency. - -  - -### Local Storage - -When using local storage for the analytics and/or config database, we recommend the following specs: - -- 100 IOPS -- 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. - -{{< call-out "tip" >}} -To conserve IO and/or disk space, you can use a separate disk for the local storage directory `/opt/nginx-controller/clickhouse_data`. -{{< /call-out >}} - -  - -### NFS - -To use NFS for external storage for the analytics and/or config database, consider the following: - -- Make certain that the NFS version used by the server is supported by the client system where you're installing NGINX Controller. -- If you're using NFS v4 file locking or Network Lock Manager (NLM) on the NFS server, make sure that the client system that's running your NGINX Controller has access to the mount point. -- Install the `nfs-common` (on Ubuntu/Debian) or `nfs-utils` (on CentOS/RedHat) package on all hosts on which NGINX Controller will be installed. -- The `no_root_squash` option must be set for the mount point on the NFS server. If this is not allowed, the owner of the path used for the analytics database must be set to `101:101` and owner of the path for config database must be set to `70:70`. -- The config database should support a throughput of 2 MiB/s or greater. - -  - -### AWS EBS - -{{< call-out "important" >}} -If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. -{{< /call-out >}} - -If you are installing NGINX Controller on [AWS EC2 instances](https://aws.amazon.com/ec2/getting-started/) and plan to use EBS volumes for the analytics and/or config database, consider the following: - -You will need add an IAM role like that shown below. - -- IAM Role for [Single-Node Installation]({{< ref "/controller/admin-guides/install/install-nginx-controller.md" >}}) - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -- IAM Role for [Multi-Node Installation]({{< ref "/controller/admin-guides/install/resilient-cluster-aws.md" >}}) - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -  - ---- - -## Supported PostgreSQL Versions - -NGINX Controller supports the following versions of PostgreSQL: - -- PostgreSQL 12.x -- works with NGINX Controller 3.9 and later. -- PostgreSQL 9.5 -- works with NGINX Controller 3.0 and later. - -For a system monitoring **100 NGINX Plus instances**, we recommend at least **32 GB of database storage**. Database storage requirements can vary, depending on the number of NGINX Plus instances, components, published API specs, and the churn rate for configuration changes. For monitor-only implementations, the database storage needs are small; for API Management (APIM) and/or App Delivery Controller (ADC) implementations in production, the storage needs are greater. - -{{< call-out "important" >}} -If you use PostgreSQL 12, we recommend disabling [Just-in-Time (JIT)](https://www.postgresql.org/docs/12/jit.html) compilation to improve NGINX Controller's performance. To disable JIT, edit the `postgresql.conf` file and set `jit=off`. -{{< /call-out >}} - - -  - ---- - -## Firewall/IP Settings - -Configure NGINX Controller with the following firewall settings: - -{{< bootstrap-table "table table-striped table-bordered" >}} - -|Port| Used by | Used for| -|---|---|---| -| 5432 TCP | NGINX Controller database | Incoming connections to the NGINX Controller database from the NGINX Controller host. This is the default PostgreSQL port. | -| 443 TCP | • NGINX Controller
• NGINX Controller licensing | • Incoming connections to NGINX Controller from a browser; for example, from an internal network and NGINX Plus instances
• Incoming and outgoing connections used to used to validate the entitlements for your NGINX Controller license | -| 8443 TCP | NGINX Controller | Incoming connections from NGINX Plus instances
You need to **open** port 8443 TCP if you're running **NGINX Controller v3.18.2 or earlier**| -| 8883 TCP | NGINX Controller licensing | Incoming and outgoing connections used to validate the entitlements for your NGINX Controller license
Port 8883 TCP needs to be **opened** only if you're running **NGINX Controller v3.15 or earlier**| - -{{< /bootstrap-table >}} - -If you have a firewall running on the NGINX Controller host, enable NAT (masquerade) and open the following ports. These ports are used for **internal traffic** only and don't need to be open to the outside: - -{{< bootstrap-table "table table-striped table-bordered" >}} - -|Port| Used by | Used for| -|---|---|---| -|2379 TCP
2380 TCP
6443 TCP|NGINX Controller|Incoming requests to the Kubernetes control plane; used for the Kubernetes API server and etcd| -|10250 TCP|NGINX Controller|Incoming requests to the Kubernetes worker node; used for the Kubelet API| -|10251 TCP|NGINX Controller|Incoming requests to the Kubernetes kube-scheduler; used for the pod scheduling| -|10252 TCP|NGINX Controller|Incoming requests to the Kubernetes kube-controller-manager; used for regulating the state of the system| -|8472 UDP|NGINX Controller|Used for pod-to-pod communication in multi-node resilient clusters| - -{{< /bootstrap-table >}} - -For more information about these ports, see the Kubernetes guide [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports). - - -  - ---- - -## Supported Python Versions - -NGINX Controller and the NGINX Controller Agent versions 3.6 and earlier require Python 2.6 or 2.7. Python is not needed for NGINX Controller or the NGINX Controller Agent versions 3.7 and later. - -  - ---- - -## Open-Source Licenses - -The list of open-source packages and their licenses used by NGINX Controller can be found in the downloaded file that is part of the NGINX Controller package. On your NGINX Controller host, see `controller-installer/files/license-controller.md`. - -In addition, see the AskF5 KB article [Third-party software for NGINX Controller controller-datacollection-components](https://support.f5.com/csp/article/K30028643) for third-party software packages that may be used by or distributed with controller-datacollection-components. This information is not included in the `license-controller.md` that's mentioned above. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/resilient-cluster-aws.md b/content/controller/admin-guides/install/resilient-cluster-aws.md deleted file mode 100644 index 839cc8f2e..000000000 --- a/content/controller/admin-guides/install/resilient-cluster-aws.md +++ /dev/null @@ -1,332 +0,0 @@ ---- -description: This guide explains how to deploy F5 NGINX Controller as a multi-node - resilient cluster on AWS. -nd-docs: DOCS-257 -title: Deploy NGINX Controller as a Resilient Cluster on AWS -toc: true -weight: 310 -type: -- tutorial ---- - -## Overview - -Complete the steps in this guide to deploy F5 NGINX Controller as a resilient, three-node cluster on AWS. A multi-node cluster ensures that NGINX Controller stays up even if one of the control-plane hosts becomes unavailable. - -### Failure Tolerance - -To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. - -If a node fails in a resilient cluster, NGINX Controller automatically redirects traffic to the other working nodes. A multi-node cluster is operational with only two nodes; however, a two-node cluster isn't resilient to further failures. If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. - -{{< call-out "important" >}}The failover time can take **up to 5 minutes** when a node fails. During this time, NGINX Controller may be unavailable while services are migrated and restarted. Resiliency will be restored once there are **three working nodes** in the cluster. -{{< /call-out >}} - -The following table shows how many nodes are needed for a cluster to have a quorum and what the failure tolerance is: - - - -| Cluster Size | Quorum | Failure Tolerance | -|--------------|--------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | 1 | - -Larger clusters aren't supported. - -  - ---- - -## Before You Begin - -### Implementation Considerations - -Before installing or configuring NGINX Controller as a multi-node cluster, review the following list of considerations to assist with planning: - -- Configuring NGINX Controller as a multi-node cluster on AWS requires **NGINX Controller 3.14 or later**. To upgrade from an earlier version, refer to the [Update NGINX Controller]({{< ref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}) steps for instructions. -- Data migration is not supported, so it's not possible to implement a multi-node cluster with local volumes without reinstalling NGINX Controller. -- If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. -- Cluster config changes are orchestrated by a primary control plane node that writes to the external config database. Each NGINX Controller control plane node hosts a set of services (pods) that read and write data. Only the node that hosts the pod that manages the config data writes to the external config database. - - - - -  - ---- - -### Prerequisites - -{{< call-out "important" >}}If you plan to run NGINX Controller on AWS EC2 instances, we recommend you use NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations).{{< /call-out >}} - -Things you'll need before installing NGINX Controller as a resilient cluster: - -- Three hosts on which you can install NGINX Controller to create a cluster -- The `controller-installer-.tar.gz` package, which you can get from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). You need to upload and extract this tarball **on each host**. -- A license file for NGINX Controller -- A tool to send API requests, such as Postman or curl -- An external volume for the config database - - When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) database for you; this is the recommended implementation. Alternatively, you can [install your own PostgreSQL database for the config database]({{< ref "/controller/admin-guides/install/install-nginx-controller.md#postgresql-optional" >}}), which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Regardless of whether you use an embedded or an externally managed config database, the config database must be on an external volume for resilient clusters. - -- An external volume for the analytics database - -  - ---- - -## Configure IAM Roles - -{{< call-out "important" >}}If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone.{{< /call-out >}} - -If you are installing NGINX Controller on [AWS EC2 instances](https://aws.amazon.com/ec2/getting-started/) and plan to use EBS volumes for the analytics and/or config database, you will need to add an IAM role like the one shown below. This will also allow the automatic creation of Elastic Load Balancers (ELBs). Additionally, for successful automatic creation of ELBs, all the EC2 instances that are or will be part of the cluster must be tagged with the following key-value pair: - `kubernetes.io/cluster/NGINX-CONTROLLER : owned` - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] -} -``` - -  - ---- - -## Install NGINX Controller - -- Complete the steps in the [NGINX Controller Installation Guide]({{< ref "/controller/admin-guides/install/install-nginx-controller.md" >}}) to install NGINX Controller on the first node. - -  - ---- - -## License NGINX Controller - -- Follow the steps to [license NGINX Controller]({{< ref "/controller/platform/licensing-controller.md" >}}). - -  - ---- - -## Add Nodes to the Cluster - -Nodes are additional control-plane hosts that you can add to your cluster to improve uptime resilience. For a resilient cluster, you should have at least three nodes, of which **two nodes must always be operational**. - -{{< call-out "important" >}} -When adding a third node to the cluster for the first time, NGINX Controller may become momentarily unavailable while the cluster is being created. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. -{{< /call-out >}} - -Take the following steps to add a node to the cluster: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, select **Create Node**. -1. Add a name for the node. -1. (Optional) Add a description. -1. Add the hostname or IP address -- or both -- for the node. -1. Select **Save**. The new node appears in the list of nodes on the **Cluster** overview page with a `Configuring` status. -1. Choose the new node's name in the list, then select **View** (eye icon). A page displays with command-line instructions for adding the node. -1. Copy the `install.sh` command and join-key that are shown. -1. Open an SSH connection to the node that you're adding to the cluster. -1. (Optional) If you're adding a node that was previously deleted, uninstall NGINX Controller from the node if you haven't already, and then continue with the remaining steps in this procedure: - - ```bash - /opt/nginx-controller/uninstall.sh - ``` - -1. Upload and extract the `controller-installer-.tar.gz` tarball. -1. Run the `install.sh` command with the join-key that you copied in the previous step. If you get an error that the join-key has expired, you can get a new one by following the steps in this topic to add a node using the web interface or the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}). - - ```bash - cd controller-installer - ./install.sh --join-key - ``` - -1. After the installation finishes, the node status in the web interface changes to `Configured`. -1. Repeat these steps for each node that you want to add to the cluster. - -{{< call-out "note" >}} -To add nodes to your cluster using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a POST request to the `/platform/nodes` endpoint. -{{< /call-out>}} - -  - ---- - -## Add Load Balancer Alias to FQDN - -You must add the hostname or IP address for the load balancer as a CNAME or A record for the domain that's used as the Fully Qualified Domain Name (FQDN) for NGINX Controller. - -To get the hostname or IP address for the load balancer using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a GET request to the `/platform/global` endpoint. - -  - ---- - -## Delete a Node - -There might be situations when you need to delete a node, either temporarily for maintenance or permanently to decommission a node. - -If you need to remove a node temporarily, follow the steps in the [Add Nodes to the Cluster](#add-nodes-to-the-cluster) topic when you are ready to re-add it. Make sure to uninstall NGINX Controller from the node before re-installing NGINX Controller with the new join-key. - -{{< call-out "important" >}} -Deleting nodes can cause NGINX Controller to become momentarily unavailable while the cluster is being updated. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. When deleting nodes, make sure that **at least two nodes are always operational**. If the cluster has fewer than two working nodes, NGINX Controller may become unresponsive, and you may not be able to add new nodes. -{{< /call-out >}} - -{{< call-out "note" >}} -To delete nodes from your cluster using the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}), send a DELETE request to the Nodes endpoint. -{{< /call-out>}} - -To delete a node from the cluster using the web interface: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, choose the node you want to delete, then select **Delete** (trash icon). -1. Select **Delete** to confirm. -1. To finish deleting a node from the cluster, uninstall NGINX Controller from the node: - - 1. SSH into the node that you're deleting from the cluster. - 1. Run the NGINX Controller uninstall script: - - ```bash - /opt/nginx-controller/uninstall.sh - ``` - -{{< call-out "note" >}} -To delete nodes from your cluster using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a DELETE request to the `/platform/nodes` endpoint. -{{< /call-out>}} - -  - ---- - -## Replace a Failed Node - -To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. - -If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. - -To replace a failed node: - -1. [Delete the failed node](#delete-a-node). -1. [Add a new node](#add-nodes-to-a-cluster). - -  - ---- - -## Updating a Cluster - -When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. - -{{< call-out "warning" >}}Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /call-out >}} - -{{< call-out "important" >}} -Active users will be logged out from NGINX Controller during an update. We recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. -{{< /call-out >}} - -To update your cluster to a newer version of NGINX Controller, take the following steps: - -1. Before updating the cluster, [check each node's status]({{< ref "/controller/platform/manage-cluster.md#view-node-status" >}}) to confirm the nodes are healthy. Resolve any degradations before updating. -1. Download the new installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). - -1. Extract the installer package and save the contents to each node: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the update script on each node -- the order in which you update the nodes doesn't matter: - - ```bash - cd controller-installer - ./update.sh - ``` - -  - -{{< versions "3.14" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/resilient-cluster-private-cloud.md b/content/controller/admin-guides/install/resilient-cluster-private-cloud.md deleted file mode 100644 index 5672df653..000000000 --- a/content/controller/admin-guides/install/resilient-cluster-private-cloud.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -description: This guide explains how to deploy F5 NGINX Controller as a multi-node - resilient cluster on a private cloud. -nd-docs: DOCS-258 -title: Deploy NGINX Controller as a Resilient Cluster on a Private Cloud -toc: true -weight: 300 -type: -- tutorial ---- - -## Overview - -Complete the steps in this guide to deploy F5 NGINX Controller as a resilient, three-node cluster on your private cloud. A multi-node cluster ensures that NGINX Controller stays up even if one of the control-plane hosts becomes unavailable. - -The diagram below shows how the different objects in a multi-node NGINX Controller cluster relate to each other. The control nodes communicate with an embedded, self-hosted database that is stored on an external volume. The NGINX Controller Agent -- and NGINX Controller users -- can access the cluster via a load balancer or floating IP address that is associated with NGINX Controller's FQDN. If a node in the cluster becomes unavailable for any reason, traffic is re-routed automatically to an available node. - -{{< img src="/ctlr/img/multi-node-diagram.png" alt="Diagram showing the relationship of objects in a multi-node cluster." width="639" height="689" >}} - - -### Failure Tolerance - -To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. - -If a node fails in a resilient cluster, NGINX Controller automatically redirects traffic to the other working nodes. A multi-node cluster is operational with only two nodes; however, a two-node cluster isn't resilient to further failures. If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. - -{{< call-out "important" >}}The failover time can take **up to 5 minutes** when a node fails. During this time, NGINX Controller may be unavailable while services are migrated and restarted. Resiliency will be restored once there are **three working nodes** in the cluster. -{{< /call-out >}} - -The following table shows how many nodes are needed for a cluster to have a quorum and what the failure tolerance is: - - - -| Cluster Size | Quorum | Failure Tolerance | -|--------------|--------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | 1 | - -Larger clusters aren't supported. - -  - ---- - -## Before You Begin - -### Implementation Considerations - -Before installing or configuring NGINX Controller as a multi-node cluster, review the following list of considerations to assist with planning: - -- Configuring NGINX Controller as a multi-node cluster on a private cloud requires **NGINX Controller 3.12 or later**. To upgrade from an earlier version, refer to the [Update NGINX Controller]({{< ref "/controller/admin-guides/install/install-nginx-controller.md#update-nginx-controller" >}}) steps for instructions. -- Data migration is not supported, so it's not possible to implement a multi-node cluster with local volumes without reinstalling NGINX Controller. -- If you plan to run NGINX Controller on AWS EC2 instances, we recommend using NFS shares for the external volumes. Using EBS shares for multi-node clusters is not recommended because of the [EBS Availability Zone limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html#considerations); for example, the requirement to have EC2 instances and EBS volumes in the same Availability Zone. -- Cluster config changes are orchestrated by a primary control plane node that writes to the external config database. Each NGINX Controller control plane node hosts a set of services (pods) that read and write data. Only the node that hosts the pod that manages the config data writes to the external config database. - -### Prerequisites - -Things you'll need before installing NGINX Controller as a resilient cluster: - -- Three hosts on which you can install NGINX Controller to create a cluster -- The `controller-installer-.tar.gz` package, which you can get from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). You need to upload and extract this tarball **on each host**. -- A license file for NGINX Controller -- A tool to send API requests, such as Postman or curl -- An external volume for the config database - - When installing NGINX Controller, you can choose to have NGINX Controller install and manage a self-hosted -- also known as "embedded" -- [PostgreSQL](https://www.postgresql.org/) database for you; this is the recommended implementation. Alternatively, you can [install your own PostgreSQL database for the config database]({{< ref "/controller/admin-guides/install/install-nginx-controller.md#postgresql-optional" >}}), which you manage; this is sometimes referred to as an "external config database" because it is externally managed by you. Regardless of whether you use an embedded or an externally managed config database, the config database must be on an external volume for resilient clusters. - -- An external volume for the analytics database - -  - ---- - -## Install NGINX Controller - -- Complete the steps in the [NGINX Controller Installation Guide]({{< ref "/controller/admin-guides/install/install-nginx-controller.md" >}}) to install NGINX Controller on the first node. - -  - ---- - -## License NGINX Controller - -- Follow the steps to [license NGINX Controller]({{< ref "/controller/platform/licensing-controller.md" >}}). - -  - ---- - -## Add Nodes to the Cluster - -Nodes are additional control-plane hosts that you can add to your cluster to improve uptime resilience. For a resilient cluster, you should have at least three nodes, of which **two nodes must always be operational**. - -{{< call-out "important" >}} -When adding a third node to the cluster for the first time, NGINX Controller may become momentarily unavailable while the cluster is being created. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. -{{< /call-out >}} - -Take the following steps to add a node to the cluster: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, select **Create Node**. -1. Add a name for the node. -1. (Optional) Add a description. -1. Add the hostname or IP address -- or both -- for the node. -1. Select **Save**. The new node appears in the list of nodes on the **Cluster** overview page with a `Configuring` status. -1. Choose the new node's name in the list, then select **View** (eye icon). A page displays with command-line instructions for adding the node. -1. Copy the `install.sh` command and join-key that are shown. -1. Open an SSH connection to the node that you're adding to the cluster. -1. (Optional) If you're adding a node that was previously deleted, uninstall NGINX Controller from the node if you haven't already, and then continue with the remaining steps in this procedure: - - ```bash - /opt/nginx-controller/uninstall.sh - ``` - -1. Upload and extract the `controller-installer-.tar.gz` tarball. -1. Run the `install.sh` command with the join-key that you copied in the previous step. If you get an error that the join-key has expired, you can get a new one by following the steps in this topic to add a node using the web interface or the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}). - - ```bash - cd controller-installer - ./install.sh --join-key - ``` - -1. After the installation finishes, the node status in the web interface changes to `Configured`. -1. Repeat these steps for each node that you want to add to the cluster. - -{{< call-out "note" >}} -To add nodes to your cluster using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a POST request to the `/platform/nodes` endpoint. -{{< /call-out>}} - -  - ---- - -## Set the Floating IP - -**For private cloud deployments**, you must set a floating IP to complete setting up your multi-node resilient cluster. - -A floating IP -- also called a virtual IP -- is a static, routable IPv4 address that improves service resiliency by allowing NGINX Controller to continue to receive traffic if a node becomes unavailable. The floating IP is assigned to one of the cluster nodes, and if the node fails, the floating IP is automatically transferred to another node. The floating IP should not be in any DHCP pool. - -{{< call-out "important" >}} -The floating IP needs to be added as an A record for the domain that's used as the Fully Qualified Domain Name (FQDN) for NGINX Controller. - -NGINX Controller **does not support IPv6** addresses for the floating IP. -{{< /call-out >}} - -Take the following steps to add a floating IP for your private cloud cluster: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, in the **Cluster Configuration** section, select the edit icon (pencil). -1. Select the **Use Floating IP** toggle to turn it on. -1. Add an IP address for the floating IP. -1. Select **Save**. -1. Complete the steps to [update the FQDN](#update-the-fqdn) to use the floating IP. - -{{< call-out "note" >}} -To set a floating IP using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. -{{< /call-out>}} -  - ---- - -## Update the FQDN - -The Fully Qualified Domain Name (FQDN) should be a proper domain. The FQDN is used by Controller Agents to access NGINX Controller. It's also used to access the web interface. - -Updating the FQDN for NGINX Controller is a two-step process: - -1. Update the FQDN for NGINX Controller using the web interface or the REST API. -1. Update the Controller Agents to use the new FQDN. - -  - -### Update the FQDN for NGINX Controller - -To change the FQDN for NGINX Controller using the web interface, take the following steps: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the Cluster overview page, in the **Cluster Configuration** section, select the edit icon (pencil). -1. In the FQDN box, type the new FQDN that you want to use. If you've [set a floating IP](#set-the-floating-ip), use that value for the FQDN. -1. Select the **Update API Gateway SSL Certificate** toggle. -1. Select an option for updating the API Gateway cert: - - - **Paste**: Paste the cert and key contents in the respective boxes. - - **File**: Browse for and upload the cert and key files. - -1. Select **Save**. The cluster services will restart. During this time, the web interface will be briefly unavailable. -1. Follow the steps to [update the FQDN for Controller Agents](#update-the-fqdn-for-controller-agents). - -{{< call-out "note" >}} -To change the FQDN for NGINX Controller using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. -{{< /call-out>}} -  - -### Update the FQDN for Controller Agents - -To update the FQDN for Controller Agents, take the following steps on each instance where the Controller Agent is installed: - -1. Open the `/etc/controller-agent/agent.conf` file for editing. -1. Update the `api_url` value with the new FQDN: - - ```nginx - [cloud] - api_url = https://:8443/1.4 - ``` - -1. Save the changes. -1. Restart the Controller Agent: - - ```bash - sudo service controller-agent restart - ``` - - -  - ---- - -## Update the API Gateway SSL Certificate - -Take the following steps to update the API Gateway SSL certificate: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, in the **Cluster Configuration** section, select the edit icon (pencil). -1. Select the **Update API Gateway SSL Certificate** toggle. -1. Select an option for updating the cert: - - - **Paste**: Paste the cert and key contents in the boxes. - - **File**: Browse for and upload the cert and key files. - -1. Select **Save**. - -{{< call-out "note" >}} -To update the API Gateway SSL certificate and key using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PATCH request to the `/platform/global` endpoint. -{{< /call-out>}} - -  - ---- - -## View Node Status - -Take the following steps to view the status for a node: - -{{< call-out "note" >}} -To view a node's status using the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}), send a GET request to the Nodes endpoint. -{{< /call-out>}} - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, choose the node you want to view details for, then select **View** (eye icon). A panel opens and displays the node's current condition, including any errors. If you're adding a node to the cluster, the node status panel shows the command-line instructions to follow to complete setting up the node. - -  - ---- - -## Delete a Node - -There might be situations when you need to delete a node, either temporarily for maintenance or permanently to decommission a node. - -If you need to remove a node temporarily, follow the steps in the [Add Nodes to the Cluster](#add-nodes-to-the-cluster) topic when you are ready to re-add it. Make sure to uninstall NGINX Controller from the node before re-installing NGINX Controller with the new join-key. - -{{< call-out "important" >}} -Deleting nodes can cause NGINX Controller to become momentarily unavailable while the cluster is being updated. For this reason, we recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. When deleting nodes, make sure that **at least two nodes are always operational**. If the cluster has fewer than two working nodes, NGINX Controller may become unresponsive, and you may not be able to add new nodes. -{{< /call-out >}} - -{{< call-out "note" >}} -To delete nodes from your cluster using the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}), send a DELETE request to the Nodes endpoint. -{{< /call-out>}} - -To delete a node from the cluster using the web interface: - -1. Open the NGINX Controller web interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Cluster**. -1. On the **Cluster** overview page, choose the node you want to delete, then select **Delete** (trash icon). -1. Select **Delete** to confirm. -1. To finish deleting a node from the cluster, uninstall NGINX Controller from the node: - - 1. SSH into the node that you're deleting from the cluster. - 1. Run the NGINX Controller uninstall script: - - ```bash - /opt/nginx-controller/uninstall.sh - ``` - -{{< call-out "note" >}} -To delete nodes from your cluster using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a DELETE request to the `/platform/nodes` endpoint. -{{< /call-out>}} - -  - ---- - -## Replace a Failed Node - -To be resilient, a cluster requires three working nodes. That's two nodes for a quorum and one node for failure tolerance. - -If one of the nodes in a multi-node cluster becomes degraded or fails, you must take action **as soon as possible** to recover or replace the failed node or risk losing resiliency. - -To replace a failed node: - -1. [Delete the failed node](#delete-a-node). -1. [Add a new node](#add-nodes-to-a-cluster). - -  - ---- - -## Updating a Cluster - -When updating NGINX Controller on a multi-node cluster, run the `update.sh` script on each node individually -- the order in which you update the nodes doesn't matter. - -{{< call-out "warning" >}}Do not update the nodes in a multi-node cluster in parallel. Doing so may result in race conditions for certain jobs, such as database migrations, and may cause the cluster to become unavailable.{{< /call-out >}} - -{{< call-out "important" >}} -Active users will be logged out from NGINX Controller during an update. We recommend updating NGINX Controller during a planned maintenance window to minimize disruptions. -{{< /call-out >}} - -To update your cluster to a newer version of NGINX Controller, take the following steps: - -1. Before updating the cluster, [check each node's status]({{< ref "/controller/platform/manage-cluster.md#view-node-status" >}}) to confirm the nodes are healthy. Resolve any degradations before updating. -1. Download the new installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). - -1. Extract the installer package and save the contents to each node: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the update script on each node -- the order in which you update the nodes doesn't matter: - - ```bash - cd controller-installer - ./update.sh - ``` - -  - -{{< versions "3.12" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/try-nginx-controller-app-sec.md b/content/controller/admin-guides/install/try-nginx-controller-app-sec.md deleted file mode 100644 index 7d5bfee52..000000000 --- a/content/controller/admin-guides/install/try-nginx-controller-app-sec.md +++ /dev/null @@ -1,305 +0,0 @@ ---- -description: This quick-start tutorial shows you how to get started using F5 NGINX - Controller with the Application Security Add-on. -nd-docs: DOCS-259 -title: Trial NGINX Controller with App Security -toc: true -weight: 115 -type: -- tutorial ---- - -## Overview - -This quick-start tutorial shows you how to get started using F5 NGINX Controller with the Application Security Add-on ("App Security"). The App Security add-on to the NGINX Controller Application Delivery Module enables a web application firewall (WAF) that you can use to protect your apps. - -Take the steps in this guide to deploy NGINX Controller with App Security and deploy NGINX App Protect with NGINX Plus as a data plane instance for use with NGINX Controller. - -{{< call-out "caution" >}}In this tutorial, NGINX Controller will install an embedded, self-hosted PostgreSQL database suitable for demo and trial purposes only. **These instructions are not meant for use in production environments**.{{< /call-out >}} - -{{< call-out "note" >}}If you already have an active NGINX Controller trial and want to add App Security to it, you can start with the [Install NGINX App Protect with NGINX Plus](#install-nginx-app-protect-with-nginx-plus) section. {{< /call-out >}} - -  - ---- - -## Technical Requirements - -Be sure to review the [NGINX Controller Technical Specifications Guide]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for additional requirements for your desired distribution and configuration. - -### Supported Distributions - -NGINX Controller with App Security supports the following distributions for deploying NGINX App Protect: - -- CentOS 7 (7.4+) -- Red Hat Enterprise Linux 7 (7.4+) -- Debian 9 -- Ubuntu 18.04 LTS, Ubuntu 20.04 LTS - -### Hardware Specs - -The following minimum hardware specifications are required for each node running NGINX Controller: - -- RAM: 8 GB RAM -- CPU: 8-Core CPU @ 2.40 GHz or similar -- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. - -### Supported NGINX Versions - -The App Security add-on for the NGINX Controller Application Delivery module is compatible with the versions of NGINX Plus and NGINX App Protect shown in the table below. New releases of NGINX Controller ADC support the last four versions of NGINX Plus at release time. - -{{< call-out "note" >}} -Refer to [Using NGINX App Protect with NGINX Controller]({{< ref "controller/admin-guides/install/install-for-controller.md" >}}) for installation instructions and additional information. -{{< /call-out>}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| NGINX Controller version | NGINX App Protect version(s) | NGINX Plus version(s) | -|-------------------------------------|-------------------------------------------------------------------------------------------------|--------------------------------| -| NGINX Controller ADC v3.22.9 | v4.5
v4.3, v4.4
v4.0, v4.1, v4.2
v3.12, v3.11 | R30
R29
R28
R27 | -| NGINX Controller ADC v3.22.8 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.7 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.6 | v4.0, v4.1
v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6 | R28
R27
R26
R25 | -| NGINX Controller ADC v3.22.5 | v3.12, v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | -| NGINX Controller ADC v3.22.4 | v3.11
v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2 | R27
R26
R25
R24 | -| NGINX Controller ADC v3.22.3 | v3.10.0, v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | -| NGINX Controller ADC v3.22.2 | v3.9.1, v3.9.0
v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3 | R26
R25
R24
R23 | -| NGINX Controller ADC v3.22, v3.22.1 | v3.8, v3.7, v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.21 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.20.1 | v3.6
v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R25
R24
R23
R22 | -| NGINX Controller ADC v3.20 | v3.5, v3.4, v3.3, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller APIM v3.19.2 | v3.6
v3.5, v3.4 | R25
R24 | -| NGINX Controller APIM v3.19 | v3.5, v3.4 | R24 | -| NGINX Controller v3.18 | v3.5, v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller v3.17 | v3.2
v3.1, v3.0, v2.3
v2.1.1 | R24
R23
R22 | -| NGINX Controller v3.16 | v3.1, v3.0, v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.14, v3.15 | v3.0, v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.13 | v2.3
v2.1.1 | R23
R22 | -| NGINX Controller v3.12 | v2.1.1 | R22 | - -{{< /bootstrap-table >}} - -  - ---- - -## Sign Up for a Trial License - -{{< call-out "note" >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /call-out >}} - -First, you need to sign up for a trial license for NGINX Controller. The trial includes access to NGINX Plus, the NGINX Controller Application Delivery module, and the Application Security add-on. - -1. Go to [MyF5](https://account.f5.com/myf5) and create a new account. -1. Verify your account and log in to MyF5. -1. On the MyF5 landing page, activate the NGINX Controller free trial. -1. On the MyF5 **Trials** page, select Launch Your Trial. -1. Download the NGINX Controller package. -1. Make note of your Association Token. You will use this to [license your NGINX Controller instance]({{< ref "/controller/platform/licensing-controller.md#add-a-license-to-nginx-controller" >}}). - - -  - ---- - -## Install NGINX Controller Prerequisites - -{{< call-out "note" >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /call-out >}} - -{{< include "controller/helper-script-prereqs.md" >}} - -  - ---- - -## Install NGINX Controller - -{{< call-out "note" >}}If you already have an active NGINX Controller trial instance that you want to add App Security to, you can skip this section.{{< /call-out >}} - -Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. - -{{< call-out "important" >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /call-out >}} - -{{< call-out "caution" >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< ref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /call-out >}} - -To install NGINX Controller, take the following steps: - -1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). -1. Extract the installer package files: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the installation script: - - ```bash - cd controller-installer - ./install.sh - ``` - -1. When prompted to use an embedded config DB, type `y`. - -1. The installation script walks through a series of steps and asks for the following inputs: - - - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. - - {{< call-out "note" >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /call-out>}} - - - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. - - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. - - **SMTP** - - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. - - **SMTP Port**: The port of the SMTP server. - - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. - - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. - - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. - - **Admin** - - **First name**: The first name for the initial admin user. - - **Last name**: The last name for the initial admin user. - - **Email address**: The contact email address for the initial admin user. - - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. - - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. You can use the FQDN to access the NGINX Controller web interface. - Additionally, the FQDN is used by Controller Agents when connecting to NGINX Controller. - - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. - - {{< call-out "important" >}} -If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. - {{< /call-out >}} - -1. Log in to NGINX Controller at `https:///login`. Use the admin email address and password that you provided during the installation process. - -1. Once NGINX Controller is installed, you may safely delete the installer package that you downloaded and extracted. - -  - ---- - -## License NGINX Controller - -To add a license to NGINX Controller, take the following steps: - -1. Go to `https:///platform/license` and log in. -1. In the **Upload a license** section, select an upload option: - - - **Upload license file** -- Locate and select your license file in the file explorer. - - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). - -1. Select **Save license**. - -{{< call-out "note" >}} -To add a license using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. -{{< /call-out>}} - - -  - ---- - -## Install NGINX App Protect with NGINX Plus - -[NGINX App Protect](https://www.nginx.com/products/nginx-app-protect/) is the security data plane for NGINX Controller App Security. Your NGINX App Protect installation will include NGINX Plus. - -{{< call-out "important" >}} -If you are adding App Security to an existing NGINX Controller trial, we recommend that you take the steps in this section to deploy a new NGINX App Protect instance, rather than adding the App Protect module to an existing NGINX Plus instance. - -NGINX Controller App Security is supported for use with a limited subset of the OS distributions that are supported by the NGINX Controller Agent and NGINX Plus. If you are planning to add NGINX App Protect to an existing NGINX Plus instance, be sure to check the [Supported Distributions](#supported-distributions) section above to verify that your NGINX Plus instance supports NGINX App Protect. -{{< /call-out >}} - -### Prerequisites - -- Be sure to review the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. -- You'll need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) when installing NGINX App Protect. If you don't have these files, you can use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) to download them. - -#### Download the NGINX App Protect Cert and Key - -Take the steps below to download the cert and key files by using the NGINX Controller REST API. - -The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. - -{{< call-out "tip" >}} -You can send a GET request to the login endpoint to find the status of the session token. -{{< /call-out >}} - -For example: - -- Login and capture the session cookie: - - ```curl - curl -c cookie.txt -X POST --url 'https:///api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "","password": ""}}' - ``` - -- Use the session cookie to authenticate and get the session status: - - ```curl - curl -b cookie.txt -c cookie.txt -X GET --url 'https:///api/v1/platform/login' - ``` - - -
- -To use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. - -For example: - -- Download JSON file: - - ```bash - curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json - ``` - -- Download GZIP file: - - ```bash - curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz - ``` - -{{< call-out "note" >}} -If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. -{{< /call-out >}} - - -Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. - -For example: - -```bash -gunzip nginx-plus-certs.gz -``` - -### Deploy NGINX App Protect - -
- -Install NGINX App Protect on a host accessible by your NGINX Controller instance by following the appropriate steps for your operating system in the [Using NGINX App Protect with NGINX Controller]({{< ref "controller/admin-guides/install/install-for-controller.md" >}}) guide. - -{{< call-out "note" >}} -If you install NGINX App Protect by using any of the OS-specific install guides, **do not make changes to the `nginx.conf` file**. -The NGINX Controller Agent manages `nginx.conf` settings and will make the appropriate adjustments for you. -{{< /call-out >}} - -
- -  - ---- - -## Add the NGINX App Protect Instance to NGINX Controller - -{{< include "controller/add-existing-instance.md" >}} - -  - ---- - -## What's Next - -You should now be ready to start your NGINX Controller with App Security trial. Refer to the following topics to get started: - -- [Configure the NGINX Controller Agent]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md" >}}) -- [Set Up Metrics Collection]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) -- [Forward Metrics Data to an External Service]({{< ref "/controller/analytics/forwarders/_index.md" >}}) -- [Set up NGINX Controller Services]({{< ref "/controller/services/overview.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/try-nginx-controller.md b/content/controller/admin-guides/install/try-nginx-controller.md deleted file mode 100644 index e77bf343f..000000000 --- a/content/controller/admin-guides/install/try-nginx-controller.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -description: This quick-start tutorial shows you how to get started using F5 NGINX - Controller with NGINX Plus. -nd-docs: DOCS-260 -title: Trial NGINX Controller with NGINX Plus -toc: true -weight: 110 -type: -- tutorial ---- - -## Overview - -This quick-start tutorial shows you how to get started using F5 NGINX Controller with NGINX Plus. - -{{< call-out "caution" >}}In this tutorial, NGINX Controller will install an embedded, self-hosted PostgreSQL database suitable for demo and trial purposes only. **These instructions are not meant for use in production environments**.{{< /call-out >}} - -{{< call-out "note" >}}If you want to try out NGINX Controller with the Application Security add-on, refer to [Trial NGINX Controller with App Security]({{< ref "/controller/admin-guides/install/try-nginx-controller-app-sec.md" >}}).{{< /call-out>}} - -  - ---- - -## Technical Requirements - -Make sure to review the [NGINX Controller Technical Specifications Guide]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md" >}}) for the requirements for your distribution and desired configuration. - -### Supported Distributions - -NGINX Controller, the NGINX Controller Agent, and the NGINX Controller Application Security Add-on support the following distributions and architectures. - -{{< call-out "note" >}}Refer to the [NGINX Plus Technical Specifications](https://docs.nginx.com/nginx/technical-specs/) guide for the distributions that NGINX Plus supports.{{< /call-out>}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -|Distribution
and Version|NGINX Controller
(Control Plane)|Agent
(Data Plane)|ADC App. Sec.
(Data Plane)|APIM Adv. Sec.
(Data Plane)|Notes| -|--- |--- |--- |--- |--- |--- | -|Amazon Linux
2
(x86_64)| Not supported|v3.0+ |Not supported|Not supported| | -|Amazon Linux
2017.09+
(x86_64)| Not supported |v3.0+|Not supported |Not supported| | -|CentOS
6.5+
(x86_64)| Not supported |v3.0+| Not supported |Not supported| • CentOS 6.5 and later versions in the CentOS 6 family are partially supported.
• This distribution does not support AVRD.| -|CentOS
7.4+
(x86_64)|v3.0+|v3.0+ | v3.12+ |v3.19+| • CentOS 7.4 and later versions in the CentOS 7 family are supported.| -|Debian
8
(x86_64)| Not supported |v3.0–3.21|Not supported|Not supported|• This distribution does not support AVRD.| -|Debian
9
(x86_64)|v3.0+|v3.0–3.21 | v3.12+ |v3.19+ | | -|Debian
10
(x86_64)| Not supported |v3.17+ | v3.17+ |v3.19+| See the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/) for requirements for Debian 10. | -|Red Hat Enterprise Linux
6.5+| Not supported |v3.0+| Not supported | Not supported| • RHEL 6.5 and later versions in the RHEL 6 family are partially supported.| -|Red Hat Enterprise Linux
7.4+
(x86_64)|v3.5+|v3.5+ | v3.12+|v3.19+| • RHEL 7.4 and later versions in the RHEL 7 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Red Hat Enterprise Linux
8.0+
(x86_64)|v3.22+|v3.22+ | v3.22+| Not supported | • RHEL 8.0 and later versions in the RHEL 8 family are supported.
• SELinux may interfere with NGINX Controller installation and operation. If you do enable SELinux, it must use permissive mode. Use of enforcing mode is not supported. | -|Ubuntu
18.04 LTS
(x86_64)|v3.0+|v3.0+ |v3.13+|v3.19+| | -|Ubuntu
20.04 LTS
(x86_64)|v3.20+|v3.12+|v3.16.1+|v3.19+| | - -{{< /bootstrap-table >}} - - - -#### Analytics, Visibility, and Reporting Daemon (AVRD) - -NGINX Controller v3.1 and later use an Analytics, Visibility, and Reporting daemon (AVRD) to aggregate and report app-centric metrics, which you can use to track and check the health of your apps. To learn more about these metrics, see the [NGINX Metrics Catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) topic. - -### Hardware Specs - -The following minimum hardware specifications are required for each node running NGINX Controller: - -- RAM: 8 GB RAM -- CPU: 8-Core CPU @ 2.40 GHz or similar -- Disk space: 155–255 GB free disk space. 255 GB of free space is recommended if NGINX Controller App Security is enabled. See the [Storage Requirements]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#storage-requirements" >}}) section for a categorized list of the storage requirements. - -### Supported NGINX Plus Versions - -NGINX Controller supports the following [NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) versions: - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| NGINX Plus | NGINX Controller | NGINX Controller ADC | NGINX Controller APIM | -|------------|------------------|----------------------|-----------------------| -| R30 | Not supported | 3.22.9+ | Not supported | -| R29 | Not supported | 3.22.9+ | 3.19.6+ | -| R28 | Not supported | 3.22.6+ | 3.19.6+ | -| R27 | Not supported | 3.22.4+ | 3.19.6+ | -| R26 | Not supported | 3.22.2+ | 3.19.6+ | -| R25 | Not supported | 3.20.1+ | 3.19.2+ | -| R24 | 3.17+ | 3.20+ | 3.18+ | -| R23 | 3.12+ | 3.20.0 - 3.22.2 | 3.18+ | -| R22 | 3.5+ | 3.20.0 - 3.22.1 | 3.18+ | -| R21 | 3.5 - 3.12 | Not supported | Not supported | -| R20 | 3.0 - 3.12 | Not supported | Not supported | -| R19 | 2.6 - 3.5 | Not supported | Not supported | - -{{< /bootstrap-table >}} - ---- - -## Sign Up for a Trial License - -First, you need to sign up for a trial license for NGINX Controller. The trial includes access to NGINX Plus, the NGINX Controller Application Delivery module, and the Application Security add-on. - -1. Go to [MyF5](https://account.f5.com/myf5) and create a new account. -1. Verify your account and log in to MyF5. -1. On the MyF5 landing page, activate the NGINX Controller free trial. -1. On the MyF5 **Trials** page, select Launch Your Trial. -1. Download the NGINX Controller package. -1. Make note of your Association Token. You will use this to [license your NGINX Controller instance]({{< ref "/controller/platform/licensing-controller.md#add-a-license-to-nginx-controller" >}}). - -  - ---- - -## Install NGINX Controller Prerequisites - -{{< include "controller/helper-script-prereqs.md" >}} - -  - ---- - -## Install NGINX Controller - -Install NGINX Controller on a dedicated node that **does not** already have Kubernetes configured. NGINX Controller does not support pre-configured Kubernetes implementations at this time. The installer for NGINX Controller will install and configure Kubernetes for you. - -{{< call-out "important" >}}Before installing NGINX Controller, you must **disable swap on the host**; this is required by Kubernetes in order for the kubelet to work properly. Refer to your Linux distribution documentation for specific instructions for disabling swap for your system. For more information about this requirement, see the AskF5 knowledge base article [K82655201](https://support.f5.com/csp/article/K82655201) and the [kubeadm installation guide](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) in the Kubernetes documentation.{{< /call-out >}} - -{{< call-out "caution" >}}**For RHEL 8 deployments**, complete the additional prerequisite steps in the [Installing NGINX on RHEL 8]({{< ref "/controller/admin-guides/install/install-nginx-controller-rhel-8.md" >}}) guide before installing NGINX Controller. RHEL 8 support is a **beta** feature.{{< /call-out >}} - -To install NGINX Controller, take the following steps: - -1. Download the NGINX Controller installer package from the [MyF5 Customer Portal](https://my.f5.com/manage/s/downloads). -1. Extract the installer package files: - - ```bash - tar xzf controller-installer-.tar.gz - ``` - -1. Run the installation script: - - ```bash - cd controller-installer - ./install.sh - ``` - -1. When prompted to use an embedded config DB, type `y`. - -1. The installation script walks through a series of steps and asks for the following inputs: - - - **Config database volume type**: Specify the type of volume to use to store the config database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. - - {{< call-out "note" >}}Refer to the [NGINX Controller Technical Specifications Guide]({{< ref "/controller/admin-guides/install/nginx-controller-tech-specs.md#local-or-external-storage" >}}) for more information about the volume options and requirements.{{< /call-out>}} - - - **Analytics database volume type**: Specify the type of volume to use to store the analytics database: local, NFS, or AWS. We recommend choosing `local` for demo and trial purposes. - - **EULA**: Read the end-user license agreement. Type either `y` to accept or `n` to exit. - - **SMTP** - - **SMTP Host**: Provide the host name or IP address of an SMTP server. This is used to send password recovery emails. For trial purposes, if you don't need to receive these communications, you can enter a value of "example.com" or something similar. - - **SMTP Port**: The port of the SMTP server. - - **SMTP Authentication**: Select `y` or `n` to authenticate when connecting to the SMTP server. - - **Use TLS for SMTP Communication**: Select `y` or `n` to use SSL for SMTP server connections. - - **Do not reply email address**: The sender's email address. For example, `donotreply@example.com`. - - **Admin** - - **First name**: The first name for the initial admin user. - - **Last name**: The last name for the initial admin user. - - **Email address**: The contact email address for the initial admin user. - - **Password**: The initial admin's password. Passwords must be 6-64 characters long and must include letters and digits. - - **FQDN**: Fully qualified domain name (FQDN) -- a resolvable domain name for the NGINX Controller server. You can use the FQDN to access the NGINX Controller web interface. - Additionally, the FQDN is used by Controller Agents when connecting to NGINX Controller. - - **SSL/TLS certificates**: Type `y` to generate and use self-signed certs for running NGINX Controller over HTTPS, or type `n` to provide your own certs. - - {{< call-out "important" >}} -If you provide your own SSL/TLS certificates, you'll need a complete certificate chain file, with the intermediate CA cert appended to the server cert; the server certificate must appear **before** the chained certificates in the combined file. - {{< /call-out >}} - -1. Log in to NGINX Controller at `https:///login`. Use the admin email address and password that you provided during the installation process. - -1. Once NGINX Controller is installed, you may safely delete the installer package that you downloaded and extracted. - -  - ---- - -## License NGINX Controller - -To add a license to NGINX Controller, take the following steps: - -1. Go to `https:///platform/license` and log in. -1. In the **Upload a license** section, select an upload option: - - - **Upload license file** -- Locate and select your license file in the file explorer. - - **Paste your Association Token or license file** -- Paste your customer Association Token or the contents of your NGINX Controller license file. These are available on the [MyF5 Customer Portal](https://account.f5.com/myf5). - -1. Select **Save license**. - -{{< call-out "note" >}} -To add a license using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}), send a PUT request to the `/platform/license` endpoint. Provide your CAT or NGINX Controller license as a base64-encoded string in the JSON request body. -{{< /call-out>}} - - -  - ---- - -## Install NGINX Plus - -### Prerequisites - -- Make sure to review the [NGINX Plus Technical Specifications Guide](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. -- You'll need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) that were provided when you signed up for the trial license. If you don't have these files, you can use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) to download them. - -#### How to Download the NGINX Plus Cert and Key using the NGINX Controller API - -The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. - -{{< call-out "tip" >}} -You can send a GET request to the login endpoint to find the status of the session token. -{{< /call-out >}} - -For example: - -- Login and capture the session cookie: - - ```curl - curl -c cookie.txt -X POST --url 'https://198.51.100.10/api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "arthur@example.net","password": ""}}' - ``` - -- Use the session cookie to authenticate and get the session status: - - ```curl - curl -b cookie.txt -c cookie.txt -X GET --url 'https://198.51.100.10/api/v1/platform/login' - ``` - - -
- -To use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) to download your NGINX Plus certificate and key bundle as a gzip or JSON file, send a GET request to the `/platform/licenses/nginx-plus-licenses/controller-provided` endpoint. - -For example: - -- Download JSON file: - - ```bash - curl -b cookie.txt -c cookie.txt --header 'Content-Type: application/json' -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.json - ``` - -- Download GZIP file: - - ```bash - curl -b cookie.txt -c cookie.txt -X GET --url 'https://192.0.2.0/api/v1/platform/licenses/nginx-plus-licenses/controller-provided' --output nginx-plus-certs.gz - ``` - -{{< call-out "note" >}} -If you are using a self-signed certificate you will need to add `-k` (allow insecure connections) to your curl command to be able to download your NGINX Plus certificate and key bundle. -{{< /call-out >}} - - -Once you have downloaded your certificate and key bundle you will need to expand the `.gz` file to get your certificate and key pair. - -For example: - -```bash -gunzip nginx-plus-certs.gz -``` - -### Steps - -Take the following steps to install NGINX Plus: - -{{< call-out "important" >}} -You need the NGINX Plus certificate and public key files (`nginx-repo.crt` and `nginx-repo.key`) that were provided when you signed up for the trial license. -{{< /call-out >}} - -1. First, make sure to review the [NGINX Plus Technical Specifications Guide](https://docs.nginx.com/nginx/technical-specs/) for the requirements for your distribution and desired configuration. -2. To install NGINX Plus, follow the instructions in the [NGINX Plus Installation Guide](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/). Refer to the relevant section for your distribution. - -  - ---- - -## Add an NGINX Plus Instance to NGINX Controller - -{{< include "controller/add-existing-instance.md" >}} - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/admin-guides/install/using-helper-script.md b/content/controller/admin-guides/install/using-helper-script.md deleted file mode 100644 index 830602bb7..000000000 --- a/content/controller/admin-guides/install/using-helper-script.md +++ /dev/null @@ -1,448 +0,0 @@ ---- -description: Learn how to update F5 NGINX Controller installation settings and manage - the NGINX Controller service using the helper.sh script. -nd-docs: DOCS-261 -title: Update NGINX Controller Settings with helper.sh -toc: true -weight: 200 -type: -- how-to ---- - -## Overview - -You can use the F5 NGINX Controller `helper.sh` script to update NGINX Controller installation settings and manage the NGINX Controller process. This tutorial shows you how to use `helper.sh` to perform the following tasks: - -- Install the NGINX Controller prerequisites -- View the version of NGINX Controller that's installed and running -- Start, stop, and restart NGINX Controller -- Back up and restore the NGINX Controller config and encryption keys -- Restore the embedded config database -- Get the NGINX Plus repository key and certificate files (deprecated for `helper.sh` in NGINX Controller v3.9) -- Update the SMTP settings -- Update the database settings -- Update or replace the TLS certificates -- Print the NGINX Controller logs -- Create a support package - -## Install NGINX Controller Prerequisites - - - -{{< include "controller/helper-script-prereqs.md" >}} - - - -  - ---- - -## View the Installed NGINX Version - -To see which version of NGINX Controller is installed and running, type the following command: - -``` bash -/opt/nginx-controller/helper.sh version -``` - -The output looks similar to the following: - -``` bash -Installed version: 3.14.0 -Running version: 3.14.0 -``` - -  - ---- - -## Start, Stop, and Restart NGINX Controller - - -You can use the `helper.sh` script to start, stop, restart, and check the status of the NGINX Controller process. - -``` bash -/opt/nginx-controller/helper.sh controller start -/opt/nginx-controller/helper.sh controller stop -/opt/nginx-controller/helper.sh controller restart -/opt/nginx-controller/helper.sh controller status -``` - -  - ---- - -## Back Up and Restore Config and Encryption Keys - - - -After installing NGINX Controller, you should back up the cluster config and encryption keys. You'll need these if you ever need to restore the NGINX config database on top of a new NGINX Controller installation. - -- To back up the NGINX Controller cluster configuration and encryption keys: - - ```bash - /opt/nginx-controller/helper.sh cluster-config save - ``` - - The file is saved to `/opt/nginx-controller/cluster-config.tgz`. - -- To restore the cluster's config and encryption keys, take the following steps: - - ```bash - /opt/nginx-controller/helper.sh cluster-config load - ``` - - - -  - ---- - -## Restore Embedded Config Database - - - -This section explains how to restore the embedded config database from the latest backup file or a specific, timestamped file. - -{{< call-out "important" >}}If you restore the config database on top of a new installation of NGINX Controller, make sure to follow the steps to [restore your NGINX config and encryption keys]({{< ref "/controller/admin-guides/backup-restore/backup-restore-cluster-config.md" >}}) afterward. {{< /call-out >}} - -- To restore the embedded NGINX Controller config database **from the latest automated backup**, run the following command: - - ```bash - /opt/nginx-controller/helper.sh backup restore - ``` - -- To restore the embedded config database from **a specific backup file**: - - ```bash - /opt/nginx-controller/helper.sh backup restore - ``` - - - If you installed the embedded config database on a **local volume**, the backup files are located in `/opt/nginx-controller/postgres_data/`. - - - If you installed the embedded config database on an **NFS volume**, follow the steps in [(NFS) Copy Config Database Backup to Local Volume for Restoration]({{< ref "/controller/admin-guides/backup-restore/backup-restore-embedded-config-db.md#nfs-copy-config-database-backup-to-local-volume-for-restoration" >}}) to download the backup file to your local volume, and then use the `helper.sh` script to restore from it. - -  - - - ---- - -## Get NGINX Plus Repository Key and Certificate - -To install NGINX Plus as a data plane for NGINX Controller, you need to have the NGINX repository key and certificate files. - -{{< deprecated >}}Using the helper.sh script to download your NGINX Plus certificate and key bundle is deprecated in in NGINX Controller v3.9.{{< /deprecated >}} - -{{< call-out "note" >}}If you're running NGINX Controller v3.10+, you can use the REST API to [Download the NGINX Plus Cert and Key Bundle]({{< ref "/controller/admin-guides/install/get-n-plus-cert-and-key.md" >}}). {{< /call-out>}}  - -If you're running NGINX Controller 3.9 or earlier, use the `helper.sh` script to extract the NGINX repository key and certificate files: - -```bash -/opt/nginx-controller/helper.sh repository-cred [-c|--cert ] [-k|--key ] -``` - -{{< call-out "important" >}} - -Make sure that you've [uploaded your license in NGINX Controller]({{< ref "licensing-controller.md" >}}) first before running the `helper.sh repository-cred` command to extract the repository files. - -{{< /call-out >}} - - - -| Options | Description | -|----------|-------------| -| `-c` \| `--cert` | Creates a certificate called ``. The default file name is `nginx-repo.crt` in the current directory.| -| `-k` \| `--key` | Creates a key called ``. The default file name is `nginx-repo.key` in the current directory. | - -  - ---- - -## Update SMTP Settings - -Use the `helper.sh` script to change the SMTP address; port; TLS; sender; and optionally, the username and password. - -``` bash -/opt/nginx-controller/helper.sh configsmtp
[auth] [username] [password] -``` - -For example: - -``` bash -/opt/nginx-controller/helper.sh configsmtp 192.0.2.0 25 false noreply@example.com true user1 -``` - - - -| Options | Description | -|----------|-------------| -| `address` | The host name or IP address of the SMTP server. | -| `port` | The port of the SMTP server. | -| `tls` | `true` or `false`. Set to `true` to require SSL for connections to the SMTP server. | -| `from` | Sender's email address. | -| `auth` | `true` or `false`. Set to `true` to authenticate when connecting to the SMTP server. | -| `username` | The username to use for access to the SMTP server. | -| `password` | The password to use for access to the SMTP server. | - -  - -### Environment Variables - -We strongly recommend that you use environment variables, especially for passwords, to prevent exposing sensitive information in system processes (for example, `ps`, `top`) and the bash history. - -You use these SMTP environment variables with NGINX Controller: - -| Environment Variables | Description | -|----------|-------------| -| `CTR_SMTP_HOST` | The host name or IP address of the SMTP server. | -| `CTR_SMTP_PORT` | The port of the SMTP server.| -| `CTR_SMTP_TLS` | `true` or `false`; Set to `true` to require SSL for connections to the SMTP server. | -| `CTR_SMTP_FROM` | Sender's email address. | -| `CTR_SMTP_AUTH` | `true` or `false`; Set to `true` to authenticate when connecting to the SMTP server. | -| `CTR_SMTP_USER` | The username to use for access to the SMTP server. | -| `CTR_SMTP_PASS` | The password to use for access to the SMTP server. | - -For example: - -``` bash -CTR_SMTP_HOST=192.0.2.0 \ -CTR_SMTP_PORT=25 \ -CTR_SMTP_TLS=false \ -CTR_SMTP_FROM=noreply@nginx.test \ -CTR_SMTP_AUTH=true CTR_SMTP_USER=user1 CTR_SMTP_PASS= \ -/opt/nginx-controller/helper.sh configsmtp -``` - -  - ---- - -## Update Database Settings - -Use the `helper.sh` script to change the external config database address; port; and optionally, the username, password, and certificate authentication. However, if your current installation uses an internal config database, then these settings are read-only and cannot be modified using the `helper.sh` script (password and certificates will be automatically rotated with each Controller update). - -``` bash -/opt/nginx-controller/helper.sh configdb
[username] [password] [ssl] [ca] [cert] [key] -``` - -For example: - -``` bash -/opt/nginx-controller/helper.sh configdb 192.0.2.1 5432 user1 false -``` - - - -| Options | Description | -|----------|-------------| -| `address` | The host name or IP address of config database. | -| `port` | The port of the database. | -| `username` | The username to use for access to the config database. | -| `password` | The password to use for access to the config database. | -| `ssl` | `true` or `false`. Set to 'true' to require SSL for connections to the config database. | -| `ca` | CA certificate file path. | -| `cert` | Certificate file path. | -| `key` | Key file path. | - -  - -### Environment Variables - -We strongly recommend that you use environment variables, especially for passwords, to prevent exposing sensitive information in system processes (for example, `ps`, `top`) and the bash history. - -You can use these database environment variables with NGINX Controller: - -| Environment Variables | Description | -|----------|-------------| -| `CTR_DB_HOST` | The host name or IP address of the config database. | -| `CTR_DB_PORT` | The port of the config database used for incoming connections. | -| `CTR_DB_USER` | The username for the account to use for access to the config database; must be provided with password. | -| `CTR_DB_PASS` | The password for the account to use for access to the config database; must be provided with username. | -| `CTR_DB_ENABLE_SSL` | `true` or `false`; Set to `true` to require SSL for connections to the config database. | -| `CTR_DB_CA` | CA certificate file path. | -| `CTR_DB_CLIENT_CERT` | Certificate file path. | -| `CTR_DB_CLIENT_KEY` | Key file path. | - -For example: - -```bash -CTR_DB_HOST=192.0.2.1 \ -CTR_DB_PORT=5432 \ -CTR_DB_USER=user1 \ -CTR_DB_PASS= \ -CTR_DB_ENABLE_SSL=false \ -/opt/nginx-controller/helper.sh configdb -``` - -  - ---- - -## Update or Replace TLS Certificates - -Use the `helper.sh` script to update or replace the TLS certificates that are used to connect to NGINX Controller. - -``` bash -/opt/nginx-controller/helper.sh configtls -``` - - - -| Options | Description | -|----------|-------------| -| `cert_file` | Certificate file path. | -| `key_file` | Key file path. | - -  - ---- - -## Print NGINX Controller Logs - -To print the NGINX Controller logs, enter the following command: - -``` bash -/opt/nginx-controller/helper.sh logs -``` - -  - ---- - -## Add a Custom Logo - -The NGINX Controller logo in the user interface is replaceable with a custom logo. The requirements being: - -- The logo file is in SVG format. -- The logo is square in shape. - -{{< call-out "note" >}} The above steps modify the logo in the top left corner and in the menu, not the favicon. {{< /call-out >}} - -Follow the steps below to replace the logo: - -1. Connect to the NGINX Controller host using 'ssh'. -1. Transfer the logo file to NGINX Controller using one of the following methods: - 1. Method 1: Download the file using curl after connecting to the host using the command `curl https://example.com/custom-logo.svg`. - 1. Method 2: Upload the logo to the host using SCP: `scp /local/path/custom-logo.svg user@controller-host:/remote/path`. - 1. Method 3: Copy/Paste the logo file. - 1. Copy the logo file to the clipboard before connecting to the host. - 1. After connecting to the host, paste the file. -1. Run `helper.sh setlogo ` ( is the name of the SVG file). -1. Wait for approximately five minutes for the cache to clear and the logo to appear in the user interface. -1. Re-run the `setlogo` command on each NGINX Controller node. This has to be done after an upgrade or reinstallation. - -  - ---- - -## Create a Support Package - -You can create a support package for NGINX Controller that you can use to diagnose issues. - -{{< call-out "note" >}} -You will need to provide a support package if you open a ticket with NGINX Support via the [MyF5 Customer Portal](https://account.f5.com/myf5). -{{< /call-out >}}  - -```bash -/opt/nginx-controller/helper.sh supportpkg [-o|--output ] [-s|--skip-db-dump] [-t|--timeseries-dump ] -``` - - - -| Options | Description | -|----------|-------------| -| `-o` \| `--output` | Save the support package file to ``. | -| `-s` \| `--skip-db-dump` | Don't include the database dump in the support package. | -| `-t` \| `--timeseries-dump ` | Include the last `` of timeseries data in the support package (default 12 hours). | - -Take the following steps to create a support package: - -1. Open a secure shell (SSH) connection to the NGINX Controller host and log in as an administrator. - -1. Run the `helper.sh` utility with the `supportpkg` option: - - ```bash - /opt/nginx-controller/helper.sh supportpkg - ``` - - The support package is saved to: - - `/var/tmp/supportpkg-.tar.gz` - - For example: - - `/var/tmp/supportpkg-20200127T063000PST.tar.gz` - -1. Run the following command on the machine where you want to download the support package to: - - ``` bash - scp @:/var/tmp/supportpkg-.tar.gz /local/path - ``` - -### Support Package Details - -{{< include "controller/helper-script-support-package-details.md" >}} - - - -  - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/_index.md b/content/controller/analytics/_index.md deleted file mode 100644 index 885c42874..000000000 --- a/content/controller/analytics/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn about the F5 NGINX Controller Analytics module. -title: Analytics -weight: 120 -url: /nginx-controller/analytics/ ---- - diff --git a/content/controller/analytics/alerts/_index.md b/content/controller/analytics/alerts/_index.md deleted file mode 100644 index 272f87c87..000000000 --- a/content/controller/analytics/alerts/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn about F5 NGINX Controller alerts and notifications. -title: Alerts -weight: 100 -url: /nginx-controller/analytics/alerts/ ---- - diff --git a/content/controller/analytics/alerts/about-alerts.md b/content/controller/analytics/alerts/about-alerts.md deleted file mode 100644 index 60a735d32..000000000 --- a/content/controller/analytics/alerts/about-alerts.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -description: Learn about NGINX Controller Alerts and Notifications. -nd-docs: DOCS-520 -title: About Alerts -toc: true -weight: 100 -type: -- concept ---- - -## Overview - -The F5 NGINX Controller Analytics module lets you configure alerts and notifications, so you can stay informed about your system and app performance. In this topic, you'll learn about [alerts](#alerts), [alert rules](#alert-rules), and [alert notifications](#alert-notifications). - -{{< call-out "note" >}} -Refer to [Manage Alerts]({{< ref "/controller/analytics/alerts/manage-alerts.md" >}}) to learn how to set up alerts. -{{< /call-out>}} - -## Alerts - -An *alert* is generated when the criteria for an alert rule are met. -All alerts contain the following information: - - - -| Name | Description | -|---|---| -| `started_timestamp` | The time at which the alert was triggered.| -| `last_checked_timestamp` | The time at which the last alert check occurred.| -| `started_value` | The value of the alert metric at the time the alert was triggered.| -| `last_checked_value` | The value of the alert metric when it was last checked.| -| `dimensions` | The list of dimension values for which the alert was triggered.| - -## Alert Rules - -An *Alert Rule* defines the conditions that will trigger an alert. NGINX Controller generates names for alert rules automatically. An alert rule consists of the following information: - - - -| Name | Description | -|---|---| -| `name` | A unique identifier for the alert rule.| -| `display name` | A human-friendly name that helps you identify what the alert rule does. | -| `metric` | The [metric]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) that you want to monitor.
{{< call-out "note" >}}An alert rule can monitor one metric.{{< /call-out >}}| -| `operator` | The operator that will be applied to the value of the metric to check if an alert should be triggered. There are two available operators: `le` - less or equal and `ge` - greater or equal.| -| `threshold` | Defines the value that, when exceeded, will trigger an alert.
{{< call-out "tip" >}}You can find the allowed threshold value(s) for each metric in the **unit** field of the metric's entry in the [Metrics Catalogs Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}). Select the "Index" button to access the list of all available metrics and jump directly to that item in the catalog.{{< /call-out >}} | -| `period` | Defines the time window in which you want to calculate the aggregated metric value.
- The maximum possible time window is `24h`.
- The minimum possible time window is `2m`.| -| `filter` | Lets you refine the alert rule for a more specific set of metric values, based on dimensions.
If no filter is provided, all collected data will be used when calculating the alert rule status.| -| `group by` | Groups results according to the specified dimension(s). A separate alert will be triggered for each result group. You can provide multiple dimension names as a comma-separated list.
{{< call-out "note" >}}Using a dimension with a high cardinality of values might result in a high volume of alerts.{{< /call-out >}}| -| `notification type` | Defines how you want to receive alert notifications. | -| `email addresses` | A comma-separated list of email addresses that should receive alert notifications.| -| `mute` | Boolean; turns alert notifications on and off. Set to 'on' to mute notifications. | - -If you leave any rule parameter blank, NGINX Controller will take all relevant data for the parameter into account in the alert rule calculation. - -Each Alert Rule has a status that describes the current state of the alert rule. It contains the following information: - - - - - -| Name | Description | -|---|---| -| `alerts count` | The total number of triggered alerts for the Alert Rule since its creation.| -| `status: ok` | The rule has not triggered any alerts, or that all triggered alerts have expired.| -| `status: ongoing` | At least one alert for the alert rule is currently ongoing.| -| `lastCheckedTimestamp` | The time when the alert rule was last checked successfully.| -| `lastStartedTimestamp` | The time when alert rule status has changed from 'ok' to 'ongoing'.| -| `lastExpiredTimestamp` | The time when alert rule status has changed from 'ongoing' to 'ok'.| - -
- -Alert rules work in the following manner: - -1. Incoming metric updates are continuously monitored against the set of alert rules. -2. The most recent metric value is checked against the threshold defined in the alert rule. -3. If the threshold is met, an alert notification is generated and the rule will continue to be monitored. In the [Alerts Status]({{< ref "/controller/analytics/alerts/manage-alerts.md#view-alert-rule-status" >}}) pane, the alert instance's status will be displayed as "ongoing". -4. If subsequent metric updates show that the metric no longer violates the threshold for the configured period, the alert expires. - -## Alert Notifications - -An *Alert notification* is a message either displayed in the NGINX Controller user interface or sent via email. Alert notifications are sent when an alert is triggered or expired, depending on the alert rule criteria. - -- The **Notifications** feed contains information about all changes in the system, including alert changes. To access the Notifications feed, select the bell icon next to the **Account Settings** menu. -- A notification appears in the Notifications feed immediately when an alert is triggered or expires. -- Alert instance emails notify you when a single alert instance starts or expires. - -If you want to stop receiving notifications for an alert rule, but you don't want to delete it, you can [mute the alert rule]({{< ref "/controller/analytics/alerts/manage-alerts.md#mute-or-unmute-an-alert-rule" >}}). -Likewise, if you want to stop receiving emails for an alert rule, but you do want to continue receiving the user interface notifications, [edit the alert rule]({{< ref "/controller/analytics/alerts/manage-alerts.md#edit-an-alert-rule" >}}) and remove your email address. - -{{< call-out "note" >}}If you mute an alert rule while the alert rule status is "ongoing", you will not receive any further alert notifications, including when the alert rule status changes.{{< /call-out >}} - -### Email notifications - -{{< call-out "important" >}} -You must [verify your email address]({{< ref "/controller/analytics/alerts/manage-registered-emails.md" >}}) in order to receive alert notification emails. -{{< /call-out >}} - -When an alert rule's conditions are met, NGINX Controller sends an alert email with the subject "[controller-alert] Alert started: " to all of the email addresses that are specified in the alert rule. - -If multiple alerts are triggered in a single calculation period, NGINX Controller sends a summary email message that contains all of the alerts for the time period. - -When an alert instance expires, NGINX Controller sends a message with subject "[controller-alert] Alert expired: " to all of the email addresses that are specified in the alert rule. - -The notification uses the automatically-generated name that was assigned by the system when the rule was created. - -NGINX Controller sends summary emails once every hour. These emails contain alerts that have been triggered or expired since the last summary email was sent. If no alerts started or expired in that timeframe, then the summary will not be sent. - -### How Many Notifications to Expect - -As an example, let's say that you have three instances configured in the NGINX Controller. You want to monitor all three instances based on the `http.request.count` metric. - -Assuming that traffic is constantly flowing through all three instances, and the threshold is exceeded for all three, the system will return three alerts (one per instance). In this case, you would receive one email, containing three alert notices, and three user interface notifications. - -If the threshold is exceeded for one instance, then you will receive one alert email and one notification in the user interface. - -## How Alerts Work - -NGINX Controller checks the list of configured alert rules every 30 seconds. Then, it queries the [Metrics API]({{< ref "/controller/analytics/metrics/metrics-api.md" >}}) for the data defined in each alert rule. - -The API query uses the following template: - -`?names=()&startTime=now-&endTime=now<&additional-alert-rule-parameters>"` - -where - -- `` is the appropriate [aggregation function]({{< ref "/controller/analytics/metrics/metrics-api.md#aggregations" >}}) for the metric. You can find this information in the [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}). - - `AVG` applies to `gauge` metrics. Gauges are averaged over the time period configured in the alert rule. - - `MAX` applies to `counter` metrics. - - `SUM` applies to `incremental` metrics. - -- The `` and `` parameters are read from the alert rule configuration. -- `<&additional-alert-rule-parameters>` e.g. `filter` or `groupBy` parameters read from the alert rule configuration. - -NGINX Controller checks the value returned by the Metrics API against the configured threshold, then takes the appropriate action: - - - -| Conditions | Action | -|---|---| -| - threshold is exceeded
- "ongoing" alert does not exist | Triggers new alert. | -| - threshold is exceeded
- "ongoing" alert exists | Updates existing alert's `last_checked_timestamp` and `last_checked_value`. | -| - threshold *is not* exceeded
- "ongoing" alert exists | Expires alert.| -| - threshold *is not* exceeded
- "ongoing" does not exist | No action.| - -Next, the alert rule status is updated. Each alert rule will be updated with a new `last_checked_timestamp` and new `status`, if applicable. - -Finally, the alert notifications for newly-created or expired alerts will be sent for any rules that are not muted. - -{{< call-out "important" >}} -If the [Metrics API]({{< ref "/controller/analytics/metrics/metrics-api.md" >}}) query does not return any data -- for example, if there was no traffic through the instance and therefore no metric value -- NGINX Controller assumes a value of `0`. In such cases, the threshold will be compared to `0`. -{{< /call-out >}} - -## Alert special cases - -### Alerts for the controller.agent.status metric - -The `controller.agent.status` is a special metric representing the heartbeat of the NGINX Agent running on the instance. -The metric is reported every 1 minute by the NGINX Agent to the NGINX Controller and may only have a value of 1 if the NGINX Agent is healthy. -If the NGINX Agent is unhealthy it is not reporting the heartbeat and effectively no values for the `controller.agent.status` are stored by the NGINX Controller. -Based on this metric it is possible to create an alert rule and receive notifications whenever the total number of heartbeats reported by a certain NGINX Agent in a recent period is below or equal (or above or equal) certain threshold. - -For example, you would like to receive notifications whenever the NGINX Agent availability at any instance is less or equal 70%. -To achieve that: - -1. Create an alert rule for the `controller.agent.status` metric. -2. Set the period to at least 10 minutes (recommended, to avoid flapping conditions). Heartbeats arrive every minute while the alert status is evaluated every 30 seconds. -3. Set the threshold to 7 of the NGINX Agent availability (7 heartbeats received in the last 10 min). -4. Set the operator to below or equal. -5. Break out by the instance dimension to get notified about the NGINX Agent availability per instance. - -## What's Next - -- [Create and Manage Alert Rules]({{< ref "/controller/analytics/alerts/manage-alerts.md" >}}) -- [Manage Registered Emails]({{< ref "/controller/analytics/alerts/manage-registered-emails.md" >}}) -- [NGINX Controller REST API Reference]({{< ref "/controller/api/_index.md" >}}) - -{{< versions "3.13" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/manage-alerts.md b/content/controller/analytics/alerts/manage-alerts.md deleted file mode 100644 index 56482ef12..000000000 --- a/content/controller/analytics/alerts/manage-alerts.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -description: Learn how to view, add, mute, and delete Alerts. -nd-docs: DOCS-521 -title: Manage Alerts -toc: true -weight: 200 -type: -- concept ---- - -## Overview - -[Alerts]({{< ref "/controller/analytics/alerts/about-alerts.md" >}}) are notifications about the F5 NGINX Controller system and your applications' performance. - -[Alert rules]({{< ref "/controller/analytics/alerts/about-alerts.md#alert-rules" >}}) let you specify what you want to be alerted about. This includes which metrics you want to monitor; the trigger conditions and threshold to meet; the instance(s) to monitor; and the email address(es) to use for notifications. - -## Add an Alert Rule - -To add an alert rule: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Alerts > Alert Rules**. -1. Select **Create Alert Rule**. -1. Define your alert rule by providing the following information: - - - Name - - (Optional) Display Name - - Metric - - Condition, Threshold, and Time Period - - Filter - - (Optional) Breakout - - Email Notification Address(es): - - - Select the desired address(es) from the list provided, or - - Select **Manage Email Addresses** to add a new address, then take the steps below: - - 1. Select **Add Email Address**. - 1. Provide the desired email address. - 1. Select the submit (plus sign) icon. - 1. Select **Done** to close the Manage Email Addresses panel. - - {{< call-out "note" >}}You will need to verify the email address before it can begin receiving alerts.{{< /call-out >}} - -1. (Optional) Select **Mute Alert Rule** if you want to create the alert rule but not receive any associated notifications. -1. Select **Create**. - -## View Alerts - -To view all **alerts** that are triggered by alert rules: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Alerts > Alerts**. - -All alert rules and triggered alerts are displayed on this page. You can use the search bar to filter the alerts that are displayed. - -## Edit an Alert Rule - -To edit an alert: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Alerts > Alert Rules**. -1. Select the alert rule that you want to edit. -1. Select the edit (pencil) icon for the alert rule. -1. Make the desired changes to the alert rule, then select **Save**. - -{{< call-out "important" >}} -When you edit an alert rule, any ongoing alerts which previously met that rule will expire immediately. - -If the threshold is still exceeded in the new alert rule configuration, new alerts will be triggered. -{{< /call-out >}} - -## Mute or Unmute an Alert Rule - -If you want to stop receiving notifications for an alert rule without deleting it, you can mute it. Likewise, you can unmute alert rules for which you want to resume receiving notifications. - -To mute or unmute an alert: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Alerts > Alert Rules**. -1. Select the alert rule that you want to mute or unmute. -1. Select the mute (volume) icon to mute or unmute the alert rule. - -## Delete an Alert Rule - -To delete an alert rule: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Alerts > Alert Rules**. -1. Select the alert rule that you want to delete. -1. Select the delete (trash can) icon to delete the alert rule. -1. Select **Delete** in the pop-up box to confirm that you want to proceed. - -## What's Next - -- Learn more [About Alerts]({{< ref "/controller/analytics/alerts/about-alerts.md" >}}) -- Learn more about [Metrics and Metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) -- Learn more about [Traffic Metrics]({{< ref "/controller/analytics/metrics/overview-traffic-metrics.md" >}}) -- [Manage Registered Emails]({{< ref "/controller/analytics/alerts/manage-registered-emails.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/manage-registered-emails.md b/content/controller/analytics/alerts/manage-registered-emails.md deleted file mode 100644 index e864f4e77..000000000 --- a/content/controller/analytics/alerts/manage-registered-emails.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Learn how to manage the email addresses that receive automatic alert - notifications. -nd-docs: DOCS-522 -title: Manage Registered Email Addresses -toc: true -weight: 310 -type: -- concept ---- - -## Overview - -In order to receive email notifications for [Alerts]({{< ref "/controller/analytics/alerts/about-alerts.md" >}}), you need to provide a valid email address and complete the verification process. - -{{< call-out "important" >}} -You will not receive any alert notifications via email until you verify your email address. Any alert notification emails that were triggered by alert rules prior to the email address being verified will not be re-sent. -{{< /call-out >}} - -## List Registered Email Addresses - -To find the list of registered email addresses: - -1. Open the F5 NGINX Controller user interface and log in. -1. On the **Analytics** menu, select **Alerts**. -1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. -1. All registered email addresses are displayed in the Manage Email Addresses panel. To close the panel, select **Done**. - -{{< call-out "important" >}}The **Manage Email Addresses** button is not displayed if you don't have any Alerts configured. If this is the case, you can add a new email address when you [create an alert rule]({{< ref "/controller/analytics/alerts/manage-alerts.md#add-an-alert-rule" >}}).{{< /call-out >}} - -## Add a New Email Address - -To add a new email address: - -1. Open the NGINX Controller user interface and log in. -1. On the **Analytics** menu, select **Alerts**. -1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. -1. In the **Manage Email Addresses** panel: -1. Select **Add Email Address**. -1. Provide the desired email address. -1. Select the submit (plus sign) icon. -1. Select **Done** to close the Manage Email Addresses panel. -1. Check your email inbox for a message with the subject `[controller-team] Email verification`. -1. Click on the link provided in the email to complete the verification process. - -## Re-send a Verification Email - -To re-send a verification email to a newly-registered email address: - -1. Open the NGINX Controller user interface and log in. -1. On the **Analytics** menu, select **Alerts**. -1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. -1. Select the Resend verification (circular arrows) icon to the right of the email address. -1. Select **Done** to close the Manage Email Addresses panel. - -## Remove a Registered Email Address - -To remove a registered email address: - -1. Open the NGINX Controller user interface and log in. -1. On the **Analytics** menu, select **Alerts**. -1. On the **Alert Rules Overview** page, select **Manage Email Addresses**. -1. On the **Manage Email Addresses** panel, select the Delete email address (trash can) icon to the right of the email address that you want to remove. -1. In the **Delete Email Address** pop-up window, select **Delete** to confirm. -1. Select **Done** to close the Manage Email Addresses panel. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/alerts/service-now-notifications.md b/content/controller/analytics/alerts/service-now-notifications.md deleted file mode 100644 index b3c96f2cc..000000000 --- a/content/controller/analytics/alerts/service-now-notifications.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: Set up Alerts Integration with ServiceNow. Deprecated in v3.13. -nd-docs: DOCS-523 -title: ServiceNow Alerts Integration -toc: true -weight: 600 -type: -- how-to ---- - -## ServiceNow Alert Integration - -{{< deprecated >}} -**The ServiceNow Alert Integration is deprecated in F5 NGINX Controller v3.13.** -{{< /deprecated >}} - -The ServiceNow integration sends all notifications from NGINX Controller to the Incidents table in your ServiceNow account. Follow the steps below to set up the integration. - -1. Install Python3 on your machine. -2. In your ServiceNow instance, go to **System OAuth > Application Registry** and create a new OAuth API endpoint for external clients. - - Fill out the form and specify a long refresh token lifespan. Consider aligning the token lifespan with the expiry date of your NGINX Controller license. - - {{< call-out "important" >}} The ServiceNow integration will fail once the refresh token expires.{{< /call-out >}} - -3. Select the **Configure ServiceNow** button. In the prompt, provide the requested information for the ServiceNow client and select **Save**. - - - **ServiceNow Instance** - The instance ID for your ServiceNow account. - - **Client ID** - Client ID from ServiceNow (from Step 2). - - **Client Secret** - Client Secret from ServiceNow (from Step 2). - - **Username** - Your ServiceNow username; this is used to generate the access token and will not be stored. - - **Password** - Your ServiceNow password; this is used to generate the access token and will not be stored. - - **Controller host** - The URL of your NGINX Controller instance. - - **Controller email** - The email that you use to log in to Controller. - - **Controller password** - The password that you use to log in to Controller. - - **Controller port** - The port on which NGINX Controller is running; the default is 80. - - **Company name** - The name of your company; this is used to create the ServiceNow transport. -
-4. Watch Controller alerts come through as incidents in ServiceNow. - - Mapping of Controller Alerts to ServiceNow Priority: - - - ('alerts', 'created') → 1 - - ('alerts', 'cleared') → 3 - - ('agent', 'nginx_not_found') → 1 - - ('agent', 'nginx_config_parsing_error') → 1 - - ('ssl_expiration', 'ssl_cert_has_expired') → 1 - - ('ssl_expiration', 'ssl_cert_will_expire') → 2 - - ('agent', 'agent_version_old') → 2 - - ('agent', 'agent_version_obsoleted') → 1 - - ('group_actions', 'group_action_completed') → 3 - -{{< versions "3.0" "3.13" "ctrlvers" >}} - diff --git a/content/controller/analytics/catalogs/_index.md b/content/controller/analytics/catalogs/_index.md deleted file mode 100644 index 6edb79a50..000000000 --- a/content/controller/analytics/catalogs/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Reference information for F5 NGINX Controller Catalogs. -title: Catalogs Reference -weight: 210 -url: /nginx-controller/analytics/catalogs/ ---- - diff --git a/content/controller/analytics/catalogs/dimensions.md b/content/controller/analytics/catalogs/dimensions.md deleted file mode 100644 index 4ed5ea5b4..000000000 --- a/content/controller/analytics/catalogs/dimensions.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Information about all of the Dimensions collected by F5 NGINX Controller - Agent. -nd-docs: DOCS-524 -title: NGINX Controller Dimensions Catalog -toc: false -weight: 20 -type: -- reference ---- - -{{< dimensions path="/static/ctlr/catalogs/dimensions-catalog.json" >}} - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/catalogs/metrics.md b/content/controller/analytics/catalogs/metrics.md deleted file mode 100644 index ff4c9b25b..000000000 --- a/content/controller/analytics/catalogs/metrics.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Information about all of the Metrics collected by F5 NGINX Controller - Agent. -nd-docs: DOCS-525 -title: NGINX Controller Metrics Catalog -toc: false -weight: 20 -type: -- reference ---- - -{{< metrics path="/static/ctlr/catalogs/metrics-catalog.json" >}} - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/_index.md b/content/controller/analytics/dashboards/_index.md deleted file mode 100644 index 88cfef7a1..000000000 --- a/content/controller/analytics/dashboards/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn about F5 NGINX Controller Dashboards. -title: Dashboards -weight: 120 -url: /nginx-controller/analytics/dashboards/ ---- - diff --git a/content/controller/analytics/dashboards/application-health-score.md b/content/controller/analytics/dashboards/application-health-score.md deleted file mode 100644 index 9e27b8b3e..000000000 --- a/content/controller/analytics/dashboards/application-health-score.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: View and understand the Application Health Score for your application. -nd-docs: DOCS-526 -title: Understanding the Application Health Score -toc: true -weight: 20 -type: -- concept ---- - -## Overview - -When you log in to the F5 NGINX Controller user interface, you will see the **Analytics Dashboard Overview** page. This page contains an Application Health Score (AHS) that reflects your application's performance. - -The AHS is a customizable [Apdex-like numerical measure](https://www.apdex.org/) that can be used to estimate the quality of experience for your web application. It lets you configure service-level monitoring for your applications. - -You can select any combination of the following three service-level indicators (SLI) to create your AHS: - -- Successful requests (selected by default), -- (Optional) Request time (95th percentile), and -- (Optional) NGINX Controller Agent availability. - -Successful requests are determined according to the total observed average request time (P95) either below the low threshold (100% satisfying) or between the low and high threshold (partially satisfying). - -A simplified formula for AHS is as follows: - -`AHS = (Successful Requests %) * (Timely Requests %) * (Agent Availability %)` - -When you select the Request Time (95th percentile) for inclusion in the AHS, you can set two thresholds for the total observed average request time (P95): - -- Low threshold for satisfying requests. -- High threshold for partially satisfying requests. - -If the average request time (P95) for the selected time period is below the low threshold, this is considered as a "100% satisfying" state of requests. - -If the request time is above the low threshold and below the high threshold, a "satisfaction ratio" is calculated accordingly. -Requests above the high threshold are considered to be "0%", or "unsatisfying". - -For example: If the low threshold is 0.2s and the high threshold is 1s, a request time greater than 1s would be considered unsatisfying and the resulting score would be 0%. - -The algorithm for calculating the AHS is as follows. Here, `T1` represents the low threshold and `T2` represents the high threshold. - -```nginx -successful_req_pct = (nginx.http.request.count - nginx.http.status.5xx) / nginx.http.request.count - -if (nginx.http.request.time.pctl95 < T1) - timely_req_pct = 1 -else - if (nginx.http.request.time.pctl95 < T2) - timely_req_pct = 1 - (nginx.http.request.time.pctl95 - T1) / (T2 - T1) - else - timely_req_pct = 0 - -m1 = successful_req_pct -m2 = timely_req_pct -m3 = agent_up_pct - -app_health_score = m1 * m2 * m3 -``` - -## Customize the Application Health Score - -Take the steps below to customize the Application Health Score (AHS) that displays on the Overview page. - -{{< call-out "note" >}} -By default, the AHS and other metrics on the **Overview** page are calculated for all of the Instances monitored by the Controller Agent. -{{< /call-out >}} - -1. Open the NGINX Controller user interface and log in. -2. On the **Overview** page, select the Settings (gear) icon in the Application Health Score panel. -3. In the **Service Level Monitoring** window, define the following: - - - (Optional) Create a custom name for the monitor (replaces "Application Health Score"). - - (Optional) Select tags to narrow the data source(s) to a specific Instance or set of Instances. - - Select the Service Indicators that you want to include in the score calculation. - - - Successful requests (selected by default). - - Request time (95th percentile): Set a low threshold and a high threshold, in seconds. - - Agent availability. - -4. Select **Save**. - -## What's Next - -- [Overview of metrics and metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) -- [Set up Metrics Collection]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) -- [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) -- [Dimensions Catalog Reference]({{< ref "/controller/analytics/catalogs/dimensions.md" >}}) -- [Custom Dashboards]({{< ref "/controller/analytics/dashboards/custom-dashboards.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/custom-dashboards.md b/content/controller/analytics/dashboards/custom-dashboards.md deleted file mode 100644 index a4cc10730..000000000 --- a/content/controller/analytics/dashboards/custom-dashboards.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -description: Create custom dashboards to view custom graphs. -nd-docs: DOCS-527 -title: Create Custom Dashboards -toc: true -weight: 20 -type: -- how-to ---- - -## Overview - -You can use the F5 NGINX Controller user interface to create your own Dashboards populated with customizable graphs of NGINX and system-level metrics. - -{{< call-out "note" >}} - -- You can add up to 30 Elements to Dashboard. -- Dashboards are accessible by all Users. - -{{< /call-out >}} - -## Before You Begin - -- [Install the NGINX Controller Agent on instances that you want to monitor]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) -- [Configure Metrics collection on your NGINX instances]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) - -## Dashboards - -In NGINX Controller, you can create dashboards to display custom graphs. Some use cases for custom graphs include the following: - -- Checking NGINX performance for a particular application or microservice, for example, based on the URI path -- Displaying metrics per virtual server -- Visualizing the performance of a group of NGINX servers, for example, front-end load-balancers or an NGINX edge caching layer -- Analyzing a detailed breakdown of HTTP status codes per application - -When building a custom graph, metrics can be summed or averaged across NGINX servers. By using metric filters, it is possible to create additional "metric dimensions", for example, reporting the number of POST requests for a specific URI. - - {{< call-out "note" >}} - -The functionality of user-defined dashboards recently changed in NGINX Controller. Some of the functionalities that were present in the -previous version might not be currently present or work differently. Your old dashboards were not migrated to the new version. - - {{< /call-out >}} - -## Create a Custom Dashboard - -To create a custom dashboard: - -1. Open the NGINX Controller user interface and log in. -2. The first page you will see is the **Analytics Overview** page. -3. Select the Dashboards tab to see the **My Dashboards** list page. -4. To create a new dashboard - use **Create** button and provide required information. - -### Add a Dashboard Element - -To add an Element to a Dashboard: - -1. Create a new Dashboard or select the edit icon to edit an existing Dashboard. -2. Select **Add element** button. -3. Provide a title. -4. (Optional) Enter a description of the Element. -5. Select the type of Element to add: - - - **Line chart** displays data for a specific time period - - **Stat** displays the metric's most recent value - -6. Select a metric from the drop-down menu. -7. Select the aggregation method for the selected metric. - {{< call-out "note" >}} -For more information about metrics and supported aggregation methods, see the [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}). - {{< /call-out>}} -8. (Optional) Add a filter to refine the data. For example, you can limit the data to a specific App or Environment. -9. (Optional) Select **Add metrics** to add more metrics. - {{< call-out "note" >}} -Additional metrics can only be added to a **Line chart** Element. - {{< /call-out >}} -10. (Optional) Select the **Override Default Time Settings** option to select a time range for the Element. - - - The default time range is the last seven days. - - You can select a new pre-defined time range or select **Custom time range** to define a new time range. - -11. Select **Create** or **Edit** to save your Element settings. - -## Filter Metrics - -You can use the filtering functionality for NGINX metrics. If you select **Add filter**, you can add multiple criteria to define specific "metric dimensions". - -The filter consists of one or more expressions in a form of: - -`dimensionName operator value` - -where: - -- `dimensionName` is a name of the dimension from the dimensions catalog -- `operator` is a comparison rule (equality, likeliness, etc.) -- `value` is a value to which we want compare the dimensions value - -Filters can be used in conjunction using `AND` or `OR` logical operators. There is no possibility of nesting these expressions. - -Filters are used to narrow down the data set presented on the chart/stat. For example, you may not want to display the data for all of your applications, but only for the particular one. - -## Limitations - -- You are not able to add more than 30 elements to the single dashboard. -- All dashboards are accessible for all users. -- Dashboards defined in the old custom dashboards view are not migrated to the new dashboards view. - -## Clone a Custom Dashboard - -To clone an existing dashboard from the Dashboards page, select the **Clone** icon on a dashboard's row, or select **Clone** from the toolbar above the table (you need to select a dashboard first). - -You can also clone a dashboard from the elements view using the **Clone Dashboard** button. This button is not available in "edit" mode, so make sure you finish editing a dashboard before cloning it. - -When you clone a dashboard, the new one will have the same display name as the original dashboard + the current date. For example, if you clone the "My system graphs" dashboard, the cloned dashboard's display name will be something like "My system graphs Aug 24, 2021, 14:37:32". You can change the display name later on the Edit Config page. - -## Predefined Dashboards - -You can find predefined dashboards on the Dashboards page. Predefined dashboards have a special "Read Only" tag, include elements to show the most common metrics, and cover some common cases. The predefined dashboards might be helpful in learning how custom dashboards work. You can clone any of the predefined dashboards and then modify them as needed. - -Predefined dashboards cannot be deleted or modified. - -{{< call-out "note" >}} - -- Predefined dashboards were introduced in NGINX Controller 3.21. -- If you already have custom dashboards, the predefined ones should appear at the end of the list when default sorting is applied. - -{{< /call-out >}} - -## What's Next - -- [Overview Dashboard]({{< ref "/controller/analytics/dashboards/overview-dashboard.md" >}}) -- [Overview of Metrics and Metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) -- [Set up Metrics Collection]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) -- [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) -- [Dimensions Catalog Reference]({{< ref "/controller/analytics/catalogs/dimensions.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/dashboards/overview-dashboard.md b/content/controller/analytics/dashboards/overview-dashboard.md deleted file mode 100644 index 2693c0f2b..000000000 --- a/content/controller/analytics/dashboards/overview-dashboard.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Learn about the Dashboards that displays cumulative metrics for your - NGINX Instances. -nd-docs: DOCS-528 -title: Analytics Overview -toc: true -weight: 10 -type: -- how-to ---- - -## Overview - -The **Analytics Dashboards** provides an at-a-glance summary of the state of your F5 NGINX infrastructure and your application performance. - -## Before You Begin - -- [Install the NGINX Controller Agent on Instances that you want to monitor]({{< ref "/controller/admin-guides/install/install-nginx-controller-agent.md" >}}) - -## Overview Dashboard - -When you log in to the NGINX Controller user interface, the **Analytics Overview** page displays first by default. Select the Dashboards tab to see the **My Dashboards** list page. On the **Dashboard Overview** page, you can view the key indicators noted below. By default, the graphs display metrics for the last hour. You can select any of the default time periods -- one hour, four hours, one day, two days, or one week -- to get a better idea of your apps' overall health and performance. To view metrics over longer time periods, you can create a [custom dashboard]({{< ref "/controller/analytics/dashboards/custom-dashboards.md" >}}). - -The cumulative [metrics]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) displayed on the **Analytics Overview** page are: - -### System Metrics - -- [Application Health Score]({{< ref "/controller/analytics/dashboards/application-health-score.md" >}}): the health score for your application. -- Average CPU: 100 - AVG of the system.cpu.idle (CPU spent in an idle state) -- Average Memory: AVG of the `system.mem.used` metric - -### Application Metrics - -- Time to First Byte: AVG of the `client.ttfb.latency.max` metric -- Bytes In/s (Bytes In per second): RATE of the `http.request.bytes_rcvd` metric -- Bytes Out/s (Bytes Out per second): RATE of the `http.request.bytes_sent` metric - -- Total Requests: SUM of the `nginx.http.request.count` metric. -- HTTP 5XX Errors: SUM of the `nginx.http.status.5xx` metric. -- HTTP 4XX Errors: SUM of the `nginx.http.status.4xx` metric. -- Request time (P95): AVG of the `nginx.http.request.time.pctl95` metric. - -- Avg Client Response Latency: AVG of the `client.response.latency.max` metric -- Avg Upstream Response Latency: AVG of the `upstream.response.latency.max` metric -- Avg Client Network Latency: AVG of the `client.network.latency.max` metric. - -{{< call-out "note" >}} - -By default, the metrics are calculated for **all** of your Controller Agent-monitored Instances. - -To display metrics for a specific set of hosts (for example, only for "production"), select the gear icon on the Application Health Score panel, then add a tag or tags by which you want to filter the results. - -{{< /call-out >}} - -## What's Next - -- [Overview of metrics and metadata]({{< ref "/controller/analytics/metrics/overview-metrics-metadata.md" >}}) -- [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) -- [Dimensions Catalog Reference]({{< ref "/controller/analytics/catalogs/dimensions.md" >}}) -- [Application Health Score]({{< ref "/controller/analytics/dashboards/application-health-score.md" >}}) -- [Custom Dashboards]({{< ref "/controller/analytics/dashboards/custom-dashboards.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/data-explorer/_index.md b/content/controller/analytics/data-explorer/_index.md deleted file mode 100644 index ce3a033f6..000000000 --- a/content/controller/analytics/data-explorer/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn about F5 NGINX Controller Data Explorer. -title: Data Explorer -weight: 120 -url: /nginx-controller/analytics/data-explorer/ ---- - diff --git a/content/controller/analytics/data-explorer/how-to-use.md b/content/controller/analytics/data-explorer/how-to-use.md deleted file mode 100644 index bc9471d26..000000000 --- a/content/controller/analytics/data-explorer/how-to-use.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -description: Use the Data Explorer to examine the metrics that F5 NGINX Controller - collects. -nd-docs: DOCS-529 -title: How To Use the Data Explorer -toc: true -weight: 20 -type: -- how-to ---- - -## Overview - -This topic explains how to use the Data Explorer to view the metrics that F5 NGINX Controller collects. - -The Data Explorer lets you perform these following tasks: - -- Easily switch between contexts, metrics, and dimensions -- Specify a time range of interest -- Set the aggregation mode -- Compare results to previous periods -- Export the query that's used to generate the charts as a URL, which you can use outside of NGINX Controller - -  - -## Select the Context - -To get started with the Data Explorer, you need to select the context for the data you want to view. - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Analytics > Explorer**. -1. On the Data Explorer detail page, select a context area -- **Instances**, **Environments**, **Gateways**, or **Apps** -- for which you want to view data. - -{{< call-out "note" >}} -When you access the Data Explorer from other areas of the browser interface, the context is already defined. So, for example, if you select Data Explorer from within the Instances module (**Infrastructure > Instances > Data Explorer**), the data for your instances is displayed. When you switch between contexts, the metrics options, such as `system.cpu.idle` or `system.load.5`, are updated. -{{< /call-out >}} - -  - -## Select a Resource - -When you [select the context](#select-the-context) for the Data Explorer, a list of related resources is shown. If there aren't any related resources for the selected context, you'll see the message "No Data" on the Data Explorer detail page. - -{{< call-out "note" >}} - -If you don't see a resource in the list, but you expect it to be there, check the [selected metric](#metrics) and the [selected time range](#time-range). When a resource doesn't have the data for the [selected time range](#time-range) it won't be added to the resources list. - -{{< /call-out >}} - -To view data for a resource, select the resource's name from the resource list. - -{{< img src="/ctlr/img/data-explorer_resource.png">}} - -## Metrics - -The [list of metrics]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) is sorted alphabetically, and you can use the search feature to filter the list. As previously mentioned, the list of metrics depends on the context you've selected for the Data Explorer. For example, if you've chosen Instances for the context, then the list of metrics will be for instances. - -{{< img src="/ctlr/img/data-explorer_metric.png">}} - -When the selected metric changes, the **Aggregation** and **Group By** selectors are updated correspondingly (as well as the [list of resources](#select-a-resource) and the [Dimensions panel](#dimensions-panel)). Some metrics have different lists of **Aggregation** and **Group By** values. For example, the `http.response_code` dimension, which is a valid **Group By** value for the `http.request.count` metric, is not available for the `nginx.workers.cpu.user` metric because these items are from different contexts and aren't related to each other. - -## Aggregation Mode - -Use the Aggregation selector -- the Σ symbol with possible values of `AVG`, `MAX`, `MIN`, `RATE`, and `SUM` -- to [aggregate the data]({{< ref "/controller/analytics/metrics/metrics-api.md#aggregations" >}}). The list of possible aggregation values depends on the metrics that's selected. - -{{< img src="/ctlr/img/data-explorer_aggregation.png">}} - -## Group by Dimension - -Use the **Group By** selector to [group the data by a chosen dimension]({{< ref "/controller/analytics/metrics/metrics-api.md#groupby" >}}). - -In the following example image, the data for the `bytes_rcvd` metric is grouped by the dimension `http.request_method`, which displays a data series for the HTTP methods `DELETE`, `GET`, `LINK`, and so on. - -{{< img src="/ctlr/img/data-explorer_group-by.png">}} - -When a **Group By** selection is applied, the chart displays a top-10 data series. For example, let's say you want to check disk usage, so you select the metric `system.disk.total` and `file_path` as the dimension to group by. The chart would then display the top-10 mount points with the highest values. If you have more than 10 mount points, you'll see the top-10 mount points plus an 11th data series that's an aggregation of the rest of the data using the same selection criteria. In other words, you'll see a chart of the 10 most used mount points plus a chart of all the other mount points aggregated into one data series. When a **Group By** dimension is selected, and there are more than 10 dimensions, the 11th data series is named "Other." - -{{< call-out "note" >}} When MIN is selected as the aggregation method, the top-10 series are sorted ascending, lowest-to-highest. For all of the other aggregation methods, the top-10 values are sorted descending, highest-to-lowest. {{< /call-out >}} - -  - -## Time Range - -Use the time range selector above the chart to select the time range you want to examine. You can specify a custom time range if the predefined options aren't what you need. - -The granularity of the data series is based on the selected time range and can vary from 30 seconds to five days to make the chart easier to read. - -When you change the time range, the [list of resources](#select-a-resource) is updated correspondingly and it only includes the resources which have the data for the selected time range. - -## Compare To - -Next to the [time range](#time-range) selector, you'll find the `Compare To` list of options. This list allows you to compare data for the selected time frame with data from an earlier period. For example, you may want to view CPU usage for the last hour and compare the results to the same time from yesterday, last week, or even the previous year. - -{{< img src="/ctlr/img/data-explorer_comparison.png">}} - -{{< call-out "note" >}} - -- When comparison is turned on for a data series, the data have the suffix "Compare" in their names. -- If there is no data available for a comparison period, the comparison data series is not shown. -- When a Group By dimension is applied, data comparisons are made only with the top-10 data series and not with the "Other" series, if there is one. See the [Group By](#group-by) section for a discussion of the top-10 and "Other" series. -{{< /call-out >}} - -  - -## Show Query - -On the Data Explorer details page, you can select the **Show Query** button (eye icon) to view the URL that's used to query the Metrics API to get the data you see in the chart. If you copy the query and use it outside of NGINX Controller, you'll get the same data but in JSON format. - -The query updates whenever the selection options change. The query doesn't include requests for comparison data. - -{{< call-out "note" >}} -For instructions on how to understand the Metrics API response, refer to the topic [Using the Metrics API]({{< ref "/controller/analytics/metrics/metrics-api#understanding-the-metrics-api-response" >}}). -{{< /call-out>}} - -  - -## Dimensions panel - -On the right of the screen there is a panel with the list of dimensions available for the [selected metric](#metrics). - -{{< img src="/ctlr/img/data-explorer_dimensions-drawer.png">}} - -Each dimension is presented as a section in which you can expand and see the values for it. The values are aggregated with the [selected aggregation method](#aggregation-mode) for the [selected time range](#time-range). They depend on the following selected parameters: - -- [context](#select-the-context) -- [resource](#select-a-resource) -- [metric](#metrics) -- [aggregation](#aggregation-mode) -- [time range](#time-range) - -When one of the parameters changes, you'll see the values for expanded dimensions are also updated. - -You can see only top-10 values for each dimension, and based on the [selected aggregation](#aggregation-mode), they are sorted in following ways: - -- When MIN is selected as the aggregation method, the top-10 series are sorted ascending, lowest-to-highest. -- For all of the other aggregation methods, the top-10 values are sorted descending, highest-to-lowest. - -{{< call-out "note" >}} - -- When the selected metric changes, the list of dimensions may change as well, and some of the dimensions you recently explored may disappear from the panel. -- This panel was added in NGINX Controller v3.18. - -{{< /call-out >}} - -  - -{{< versions "3.17" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/events/_index.md b/content/controller/analytics/events/_index.md deleted file mode 100644 index 5a1be006d..000000000 --- a/content/controller/analytics/events/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: View system events and audit logs. -title: Events -weight: 140 -url: /nginx-controller/analytics/events/ ---- - diff --git a/content/controller/analytics/events/view-events.md b/content/controller/analytics/events/view-events.md deleted file mode 100644 index a2baabfbe..000000000 --- a/content/controller/analytics/events/view-events.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: View the audit log of system and user actions. -nd-docs: DOCS-530 -title: View Events -toc: true -weight: 20 -type: -- how-to ---- - -## Overview - -The Events page shows a log of the system and user actions for F5 NGINX Controller. The logs are organized by event categories and levels, making it easy to identify and review issues. - -## View Events - -Take the steps below to view NGINX Controller events: - -1. Open the NGINX Controller user interface and log in. -1. On the Analytics menu, select **Events**. -1. To view additional information about a particular event, select the event from the list to open the details pane. - -You can filter the events by typing a keyword in the search box and/or by selecting a time period. You can filter the results further by [Event Categories](#event-categories) or [Event Levels](#event-levels). - -## Event Categories - -You can select from the following Event Categories: - -- Agent Events; -- Agent Status Events; -- Controller Events; -- Audit Events -- a log of all actions performed by NGINX Controller users; -- Forwarder Notifications -- events emitted by [Data Forwarders]({{< ref "/controller/analytics/forwarders/_index.md" >}}) -- Workload Health Events -- events emitted by the Controller Agent when the health of an upstream server changes; - -To view the logs for a specific category, select the category name from the **Event Categories** list. - -## Event Levels - -Event levels sort events according to their information level: Debug, Info, Error, Warning, and Critical. - -To view the logs for a specific level, select the level name from the **Event Levels** list. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/_index.md b/content/controller/analytics/forwarders/_index.md deleted file mode 100644 index 4fa75a092..000000000 --- a/content/controller/analytics/forwarders/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn how to forward data from F5 NGINX Controller to external services. -title: Data Forwarders -weight: 130 -url: /nginx-controller/analytics/forwarders/ ---- - diff --git a/content/controller/analytics/forwarders/forward-analytics-to-datadog.md b/content/controller/analytics/forwarders/forward-analytics-to-datadog.md deleted file mode 100644 index 660e8eeb6..000000000 --- a/content/controller/analytics/forwarders/forward-analytics-to-datadog.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -description: How to forward Analytics data to Datadog. -nd-docs: DOCS-531 -title: Forward Analytics Data to Datadog -toc: true -weight: 100 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to set up an F5 NGINX Controller Integration that forwards data to [Datadog](https://www.datadoghq.com/). - -## Before You Begin - -This guide assumes that you are already an active Datadog user. If you haven't already done so, you will need to [install and configure Datadog](https://docs.datadoghq.com/) before you proceed. - -You will also need to [Create an Integration]({{< ref "/controller/platform/integrations/datadog-integration.md" >}}) for your Datadog forwarder. - -## Create a Forwarder - -Take the following steps to create a Forwarder for Datadog: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Platform**. -3. On the **Platform** menu, select **Data Forwarders**. -4. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. -5. Add a name. -6. (Optional) Add a display name. -7. (Optional) Add a description. -8. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. -9. In the **Collector Type** list, select `DATADOG`. -10. In the **Source** list, select the type of data to forward: `metrics` or `events`. -11. In the **Output Format** list, select `DATADOG`. -12. The **Selector** field consists of the following query parameters (optional): - -- `names` (inapplicable for `EVENTS`): The list of metrics names that you want to forward. -- `excluded_names` (inapplicable for `EVENTS`): The list of metric names that you don't want to forward. -- `filter`: The conditions to use to refine the metrics or events data. -- Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*filter=app='myapp'"` -- Example usage when selecting events: `"filter=type='security violation' AND app='my-app'"` - -13. (Optional) Add additional **Streams** as required using the **Add Stream** button. - -{{< call-out "important" >}} - -Each metric will be prefixed with a common namespace -- such as "nginx-controller" -- before it is sent to Datadog. This prefix is used by Datadog only and is not applied to any of the internal NGINX Controller metrics. Refer to the [metrics catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. - -For events, the "nginx-controller" namespace is added to the ["ddsource" key](https://docs.datadoghq.com/api/v1/logs/#send-logs). - -{{< /call-out >}} - -NGINX Controller events are sent to Datadog as logs and NGINX Controller dimensions are sent as tags. The Forwarder converts the dimension data to comply with the Datadog [tags format](https://docs.datadoghq.com/getting_started/tagging/#defining-tags) prior to forwarding it. In some cases, the original dimension value may be transformed to fit the tag requirements. This includes replacing comma characters (`,`) with semicolons (`;`) to ensure that Datadog will properly handle the incoming payload. - -{{< call-out "note" >}} - -See the [NGINX Controller Metrics]({{< ref "/controller/analytics/metrics/_index.md" >}}) docs for more information. - -{{< /call-out>}} - -## Verification - -Soon after you create the Datadog forwarder, you can view the selected metrics in Datadog. - -1. Log into the [Datadog web interface](https://app.datadoghq.com/). -2. On the navigation menu, select **Metrics** > **Summary**. - -## What's Next - -- Refer to [Troubleshooting Forwaders]({{< ref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. - -{{< versions "3.8" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-otlp.md b/content/controller/analytics/forwarders/forward-analytics-to-otlp.md deleted file mode 100644 index e8dcdd18e..000000000 --- a/content/controller/analytics/forwarders/forward-analytics-to-otlp.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: How to forward Analytics Metrics to OpenTelemetry Collector. -nd-docs: DOCS-532 -title: Forward Analytics Metrics to OpenTelemetry Collector -toc: true -weight: 201 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to set up an F5 NGINX Controller integration that forwards metrics to OpenTelemetry Collector. - -## Before You Begin - -This guide assumes that you already have a working instance of any OpenTelemetry Collector. - -You will also need to [Create an Integration]({{< ref "/controller/platform/integrations/otlp-integration.md" >}}) for your OpenTelemetry Collector forwarder. - -## Create a Forwarder - -Take the following steps to create a forwarder for OpenTelemetry Collector: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Platform**. -3. On the **Platform** menu, select **Data Forwarders**. -4. On the **Data Forwarders** menu, select **Create Data Forwarder**. -5. Add a name. -6. (Optional) Add a display name. -7. (Optional) Add a description. -8. Select your **Integration Reference** from the dropdown list, or select **Create New** to create a new integration. -9. In the **Collector Type** list, select `OTLP_HTTP` or `OTLP_GRPC`. -10. In the **Source** list, select the type of data to forward: `METRICS`. -11. In the **Output Format** list, select `OTLP`. -12. The **Selector** field consists of the following query parameters (optional): - -- `names`: The list of metrics names that you want to forward. -- `excluded_names`: The list of metric names that you don't want to forward. -- `filter`: The conditions to use to refine the metrics data. -- Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*&filter=app='myapp'"` - -13. (Optional) Select **Add Stream** to add additional streams, as needed. - -{{< call-out "important" >}} - -Each metric is prefixed with a common namespace -- for example, "nginx-controller" -- before it's sent to OpenTelemetry Collector. This prefix is used only by OpenTelemetry Collector and is not applied to any internal NGINX Controller metrics. Refer to the [metrics catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. - -We have tested compatability with OTLP collector v0.33.0. We will most likely support versions higher than this, assuming backwards compatability from OTLP. - -{{< /call-out >}} - -{{< call-out "note" >}} - -See the [NGINX Controller Metrics]({{< ref "/controller/analytics/metrics/_index.md" >}}) docs for more information. - -{{< /call-out>}} - -## What's Next - -- Refer to [Troubleshooting Forwaders]({{< ref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. - -{{< versions "3.16" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-splunk.md b/content/controller/analytics/forwarders/forward-analytics-to-splunk.md deleted file mode 100644 index fc62eccf5..000000000 --- a/content/controller/analytics/forwarders/forward-analytics-to-splunk.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: How to forward Analytics data to Splunk. -nd-docs: DOCS-533 -title: Forward Analytics Data to Splunk -toc: true -weight: 200 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to set up an F5 NGINX Controller Integration that forwards data to [Splunk](https://www.splunk.com/). - -## Before You Begin - -This guide assumes that you are already an active Splunk user. If you haven't already done so, you will need to [install and configure Splunk](https://docs.splunk.com/Documentation) before you proceed. - -You will also need to [Create an Integration]({{< ref "/controller/platform/integrations/splunk-integration.md" >}}) for your Splunk forwarder. - -## Create a Forwarder - -Take the following steps to create a Forwarder for Splunk: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Data Forwarders**. -1. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. -1. Add a name. -1. (Optional) Add a display name. -1. (Optional) Add a description. -1. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. -1. In the **Collector Type** list, select `SPLUNK`. -1. In the **Source** list, select the type of data to forward: `metrics` or `events`. -1. In the **Output Format** list, select `SPLUNK`. -1. The **Selector** field consists of the following query parameters (optional): - - - `names` (inapplicable for `EVENTS`): The list of metrics names that you want to forward. - - `excluded_names` (inapplicable for `EVENTS`): The list of metric names that you don't want to forward. - - `filter`: The conditions to use to refine the metrics or events data. - - Example usage when selecting metrics: `"names=nginx.*&excluded_names=nginx.upstream.*filter=app='myapp'"` - - Example usage when selecting events: `"filter=type='security violation' AND app='my-app'"` - -1. (Optional) Add additional **Streams** as required using the **Add Stream** button. - -{{< call-out "important" >}} - -Each metric will be prefixed with a common namespace -- such as `nginx-controller` -- before it is sent to Splunk. This prefix is used by Splunk only and is not applied to any of the internal NGINX Controller metrics. Refer to the [metrics catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) for the full list of valid metric names. - -In case of events, the "nginx-controller" namespace will be placed in the ["source" key](https://docs.splunk.com/Documentation/Splunk/8.1.1/Data/FormateventsforHTTPEventCollector#Event_metadata) and sent with each event. - -{{< /call-out >}} - -{{< call-out "note" >}} - -See the [NGINX Controller Metrics]({{< ref "/controller/analytics/metrics/_index.md" >}}) docs for more information. - -{{< /call-out>}} - -## What's Next - -- Refer to [Troubleshooting Forwaders]({{< ref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. - -{{< versions "3.6" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/forwarders/forward-analytics-to-syslog.md b/content/controller/analytics/forwarders/forward-analytics-to-syslog.md deleted file mode 100644 index f135c3322..000000000 --- a/content/controller/analytics/forwarders/forward-analytics-to-syslog.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: How to forward Analytics Events to Syslog. -nd-docs: DOCS-534 -title: Forward Analytics Events to Syslog -toc: true -weight: 201 -type: -- tutorial ---- - -## Overview - -Follow the steps in this guide to set up a F5 NGINX Controller Integration that forwards events to a syslog server. - -## Before You Begin - -This guide assumes that you already have a working instance of any syslog server. - -If you haven't already done so, you can use an open-source version of [Syslog-NG](https://www.syslog-ng.com/products/open-source-log-management/). - -You will also need to [Create an Integration]({{< ref "/controller/platform/integrations/syslog-integration.md" >}}) for your Syslog forwarder. - -## Create a Forwarder - -Take the following steps to create a Forwarder for Splunk: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Platform**. -1. On the **Platform** menu, select **Data Forwarders**. -1. On the **Data Forwarders** menu, select the **Create Data Forwarder** quick action. -1. Add a name. -1. (Optional) Add a display name. -1. (Optional) Add a description. -1. Select your **Integration Reference** from the dropdown menu or select **Create New** to create a new Integration. -1. In the **Collector Type** list, select `SYSLOG`. -1. In the **Source** list, select the type of data to forward: `events`. NGINX Controller can forward only `EVENTS` data to syslog. -1. In the **Output Format** list, select `SYSLOG`. -1. The **Selector** field consists of the following query parameters (optional): - - - `filter`: The conditions to use to refine the metrics or events data. - - Example usage: `"filter=type='security violation' AND app='my-app'"` - -1. (Optional) Add additional **Streams** as required using the **Add Stream** button. - -## What's Next - -- Refer to [Troubleshooting Forwaders]({{< ref "/controller/support/troubleshooting-forwarders.md" >}}) for tips on resolving common issues. - -{{< versions "3.16" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/_index.md b/content/controller/analytics/metrics/_index.md deleted file mode 100644 index 841c2b5b5..000000000 --- a/content/controller/analytics/metrics/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn about F5 NGINX Controller Metrics. -title: Metrics -weight: 150 -url: /nginx-controller/analytics/metrics/ ---- - diff --git a/content/controller/analytics/metrics/metrics-api.md b/content/controller/analytics/metrics/metrics-api.md deleted file mode 100644 index 900da4362..000000000 --- a/content/controller/analytics/metrics/metrics-api.md +++ /dev/null @@ -1,479 +0,0 @@ ---- -description: Tips and tricks for using the Metrics API query parameters to refine - your data. -nd-docs: DOCS-535 -title: Using the Metrics API -toc: true -weight: 50 -type: -- tutorial ---- - -## Overview - -You can use the F5 NGINX Controller Analytics module to monitor your NGINX instances and evaluate your applications' performance. The [Metrics API]({{< ref "/controller/api/_index.md" >}}) query parameters let you fine-tune your system data based on parameters such as time window, aggregation, time resolution, and filter. - -By using different combinations of these query parameters, you can gather information that lets you: - -- Identify which of your Apps receives the most traffic -- query for the highest number of requests among all apps. -- Understand the behavior of your back-end server(s) -- query for upstream latency by instance or location. -- Monitor your application performance -- filter on HTTP response codes to track the number of successful or failed requests by app and environment. -- Understand how your App behavior and/or usage changes across version releases -- compare data like the examples above across different versions of your application. - -## Usage - -You can use the NGINX Controller [Metrics API]({{< ref "/controller/api/_index.md" >}}) to query for desired metric names and fine-tune the data returned based on the following parameters: - -- time window (`startTime` and `endTime`) -- `filter` -- `resolution` -- `groupBy` -- `seriesLimit` -- `orderSeriesBy` -- `dimensions` - -{{< call-out "note" >}} -Because NGINX Controller is constantly evolving, these example metrics and dimensions may differ from what you see with your NGINX Controller instance. Some metrics may require pre-configured applications to be visible in the API. -{{< /call-out >}} - -### Understanding the Metrics API Response - -The [Metrics API]({{< ref "/controller/api/_index.md" >}}) response consists of query metadata and an array of `metrics` -- one array element for each queried metric. - -- The **metric** object includes the queried metric name and an array of data series associated with the metric. -- The **series** object groups metrics data according to dimension values. The series consists of dimensions (key-value map), timestamps, and the timestamps' metric values. - -```json -{ - "metrics":[ - { - "name":"http.request.count", - "series":[ - { - "dimensions":{ - "app":"app-name", - "component":"component-name", - "environment":"environment-name", - "gateway":"gateway-name", - "instance":"instance-name" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 1000 - ] - }, - { - "dimensions":{ - "app":"app-name-2", - "component":"component-name", - "environment":"environment-name", - "gateway":"gateway-name", - "instance":"instance-name" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 2000 - ] - } - ] - } - ], - "queryMetadata":{ - "endTime":"2020-07-01T12:00:00.970106672Z" - } -} -``` - -In the preceding example, there are two data series for the queried metric. The differentiator between the two series is the "app" name. This name is what makes NGINX metrics app-centric: you can easily distinguish metrics based on their dimensions' values, such as an App, Environment, or Gateway name. - -You can view the full list of the supported metrics and dimensions, with detailed descriptions, by querying the Catalog API: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/catalogs/metrics" -``` - -Likewise, you can get a full list of the available dimensions by querying the Catalogs API: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/catalogs/dimensions" -``` - -This information is also provided in the [Catalogs Reference]({{< ref "/controller/analytics/catalogs/_index.md" >}})). - -### Querying the Metrics API - -This section provides an overview of each query parameter and examples of using the parameters together to refine your data. - -The examples progress from basic usage to more advanced API queries. - -#### Names - -The `names` parameter is the only required parameter in the [Metrics API]({{< ref "/controller/api/_index.md" >}}). - -The following example query returns a response with the last recorded value for the queried metric: `http.request.count`: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count" -``` - -If the dimension values differ, the `series` array in the response will contain multiple items. - -It is possible to query the API for several metrics simultaneously. To do so, provide the metric names as a comma-separated list: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count,http.request.bytes_rcvd" -``` - -#### Time Window - -To get more than the last recorded value for the queried metric, use the following time window parameters: - -- `startTime` indicates the start of the time window to include metrics from (inclusive). -- `endTime` means the end of the time window to include metrics from (non-inclusive). - -There are a few rules to remember when working with time window parameters: - -- If you provide an `endTime`, you must also provide a `startTime`; -- `endTime` must be greater than `startTime`; -- If you give a `startTime` but don't give an `endTime`, the `endTime` defaults to the current time. - -You can define time using the `ISO 8601` format or as an offset (for example, `2020-07-14T13:07:11Z`). An offset is a string that starts with `+` or `-`, followed by a number and a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. You can also use `now` to indicate the current timestamp. - -The following example request returns all the recorded metric values for the last three hours. - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&startTime=now-3h" -``` - -The following example query contains a fully defined time window: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&startTime=now-5h&endTime=2020-07-01T09:00:00Z" -``` - -In this case, the response contains metrics from 05:00:00 to 09:00:00 on the 1st of July 2020. - -#### Aggregations - -Using only `names` and time window parameters will give you the raw data points of metrics values. - -To get a more organized response, you can provide an aggregate function for each queried metric: `AVG`, `SUM`, `COUNT`, `MAX`, `MIN`, or `RATE`. - -{{< call-out "note" >}} -In the following definitions, `time period` refers to the `resolution` (if provided) or the difference between the `endTime` and `startTime` (when `resolution` is not provided). -{{< /call-out >}} - -- `AVG` - calculates the average value of the metric data samples over the period -- `SUM` - calculates the total value of the metric data samples over the period -- `COUNT` - returns the number of collected data samples of the metric over the period -- `MIN`/`MAX` - returns the minimal/maximal data sample of the metric from the given period -- `RATE` - returns an average value of the metric calculated per second (always *per second*, regardless of the provided `resolution`), based on the data available in the given period - -{{< call-out "note" >}} -You must define a `startTime` when using aggregate functions. -{{< /call-out >}} - -{{< call-out "note" >}} -The list of supported aggregate functions for any particular metric is available in the [Metrics Catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}})). -{{< /call-out>}} - -For example, the following query returns a single value (per dimension set), which is the sum of the metric values for the last three hours. To get proper values, ensure that the `endTime` is greater than the `startTime`. - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h" -``` - -It is possible to use aggregated and non-aggregated metrics in a single query. For this query, the [Metrics API]({{< ref "/controller/api/_index.md" >}}) returns a single value per dimension set. That value is the sum of all of the metric's values for the last three hours. - -For example: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count),http.request.bytes_rcvd&startTime=now-3h" -``` - -{{< call-out "important" >}} -Using AVG aggregation with traffic metrics with the `.total` suffix may cause confusion because traffic metrics are already aggregated. To learn more, refer to the [Overview: Traffic Metrics]({{< ref "/controller/analytics/metrics/overview-traffic-metrics.md" >}})) topics. -{{< /call-out >}} - -#### Resolution - -If you want to change the returned data's granularity, you can use `resolution` parameter. This parameter must be used in conjunction with an aggregation function and a time window (at least `startTime` must be provided). - -The `resolution` parameter must be a valid duration. The duration is a string that starts with a number, followed by a unit of time: `y`, `M`, `w`, `d`, `h`, `m`, or `s`. - -The following example query returns three aggregated metric values. Here, we're asking for the data from last three hours with one-hour granularity: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count),&startTime=now-3h&resolution=1h" -``` - -There may be situations when the returned resolution is lower than that requested in the query. This has to do with metrics retention periods—the older the metric, the lower the resolution. - -If the time window contains metrics with a lower resolution than was queried for, the API downsizes the granularity to the lowest possible value. You will see a warning in the `responseMetadata`: - -```json -"responseMetadata": { - "warning": "Time window is above 8 days, Resolution is downsized to 300 seconds" -} -``` - -If no `resolution` is provided, the maximum available resolution is returned. This is calculated as `endTime` - `startTime`. - -#### Filter - -This parameter, as the name indicates, filters results based on the value of dimensions. Filtering by dimension value can help to refine the data that's returned into a more specific set. - -The `filter` query consists of one or more predicates in the form of ``, where: - -- `` is the name of the dimension; -- `` is one of the supported operators (`=`, `!=`, `<`, `<=`, `>=` `>`, `in` or `not`); -- `` is value of the dimension(s) that you want to filter on. - -For example, the following query includes a simple filter on the app name. The query returns data for the application named `app1` for the last three hours. - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&filter=app='app1'&startTime=now-3h" -``` - -{{< call-out "tip" >}} - -- Predicates can be combined into logical expressions using `OR`, `AND`, and `(` `)`. -- For matching values, wildcard (`*`) use is supported. -- We recommend wrapping predicates in single quotes to ensure that the full query string is processed correctly. - -{{< /call-out >}} - -The following example request uses `filter` with logical expressions: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=http.request.count&filter=app='ap*' and environment='prod'&startTime=now-3h" -``` - -#### GroupBy - -Using filters and aggregation functions may not be enough to allow you to get comprehensive information about a specific application or environment. - -The `groupBy` parameter helps to gather results according to the specified dimension(s). You can provide multiple dimension names as a comma-separated list. - -{{< call-out "note" >}} - -- When using `groupBy`, you must use an aggregate function and a time window (`startTime` must be defined; `endTime` is optional). -- If a request contains aggregated and non-aggregated metrics, the `groupBy` parameter will apply only to the aggregated metrics. - -{{< /call-out >}} - -For example, the following query returns data for any application with a name that starts with `ap` in the `prod` environment for the last three hours. - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app,alias&startTime=now-3h" -``` - -The API response for the query looks similar to the following: - -```json -{ - "metrics":[ - { - "aggr": "SUM", - "name":"http.request.count", - "series":[ - { - "dimensions":{ - "app":"app-name", - "alias": "alias1" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 1000 - ] - }, - { - "dimensions":{ - "app":"app-name-2", - "component":"alias1" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 2000 - ] - } - ] - } - ], - "queryMetadata":{ - "endTime":"2020-07-01T12:00:00.970106672Z" - } -} -``` - -The API returns the data for the last three hours grouped by `app` and `alias` dimensions. Unlike other queries, the API only returns those dimensions that have been selected in `groupBy`. However, the series of different dimension values are still distinguished. - -#### SeriesLimit and OrderSeriesBy - -There are cases when you might want to view only a specific data series (for example, "Top-5"). To query the API for a particular series of data, you can define the `seriesLimit` and `orderSeriesBy` query parameters. - -- `seriesLimit` sets an upper limit on the number of series returned. -- `orderSeriesBy` sorts the series values according to the order specified: - - - Must consist of two tokens -- an aggregate function and a sort order. For example, `SUM DESC`, `MIN ASC`, and so on. - - Can be used only in combination with `seriesLimit`. - -When you specify a `seriesLimit`, the response always includes one other series with an `all` metric. This series aggregates the metric values of all the series that are not included in the result. If the total number of series returned is greater than the limit specified in the query parameter, an additional series named `other` is returned. This series aggregates the metrics values of the series outside of the specified limit. - -{{< call-out "note" >}} -When using `seriesLimit`, you can only specify one metric name in the `names` parameter and one `groupBy` parameter. -{{< /call-out >}} - -**Example 1** -The following example request uses `seriesLimit` to restrict the data returned to five series: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app&seriesLimit=5&startTime=now-3h&resolution=5m -``` - -The response contains data for the last three hours, grouped by the `app` and `alias` dimensions. Unlike the other example queries, in this example, the API returns just those dimensions that have been selected in `groupBy`. Each dimension and its corresponding values are provided as distinct items in a series. - -**Example 2** -The following example query uses both `seriesLimit` and `orderSeriesBy`: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(some.metric.name)&groupBy=someDimension&seriesLimit=5&orderSeriesBy=MAX DESC&startTime=now-1d&endTime=now&resolution=5m -``` - -**Example 3** -Building on the previous examples, here we use `seriesLimit` and `orderSeriesBy` to get the top-5 URIs with the highest number of bytes received for a specific App and Environment: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.bytes_rcvd)&startTime=now-1h&filter=app='app1' AND environment='qa'&groupBy=http.uri&seriesLimit=5&orderSeriesBy=MAX DESC -``` - -In this case, the API returns five data series for the last hour ordered by MAX value in descending order for bytes received per URL, where the data is related to the application `app1` deployed on the environment `prod`. - -Together, these parameters are particularly useful for refining data. The `seriesLimit` says how many series should be returned, `orderSeriesBy` parameter defines the criteria for ordering series. - -#### Dimensions - -You can use the `dimensions` query parameter to specify which dimension(s) should be included in each metric series' response. - -Dimensions not specified in the query parameter will not be included in the response. This may result in some series having the same dimension set but being returned as separate list items. - -The following example returns results for the specified metric, where `dimensions=environment`: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&dimensions=environment&startTime=now-3h -``` - -If you have multiple Apps, the response looks similar to the following example: - -```json -{ - "metrics":[ - { - "aggr": "SUM", - "name":"http.request.count", - "series":[ - { - "dimensions":{ - "environment":"prod" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 1000 - ] - }, - { - "dimensions":{ - "environment":"prod" - }, - "timestamps":[ - "2020-07-01T12:00:00Z" - ], - "values":[ - 2000 - ] - } - ] - } - ], - "queryMetadata":{ - "endTime":"2020-07-01T12:00:00.970106672Z" - } -} -``` - -If `dimensions` and `groupBy` parameters are both used, the list of provided `dimensions` must be a subset of the list provided in `groupBy`. - -The following example uses `dimensions` with `groupBy`: - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&groupBy=app,location&dimensions=app&startTime=now-3h&resolution=5m -``` - -The `dimensions` parameter also lets you omit the dimensions from the response altogether. To do so, define `dimensions` as an empty list (`dimensions=`). - -This results in several data series for the `http.request.count` metric without any dimensions being visible. That is not useful on its own; however, if you combine the empty `dimensions` parameter with metric aggregation, you will receive a single series with aggregated values. - -For example, the following example query sums all the values in all of the series of the `http.request.count` metric for the past three hours using the default `resolution`. - -```curl -curl -X GET --cookie "session=" --url "{controller-IP}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h&dimensions= -``` - -The response looks similar to the following example: - -```json -{ - "metrics":[ - { - "aggr": "SUM", - "name":"http.request.count", - "series":[ - { - "dimensions":{}, - "timestamps":[ - "2020-07-01T12:00:00Z", - "2020-07-01T12:00:30Z", - "2020-07-01T12:01:00Z", - "2020-07-01T12:01:30Z", - ... - ], - "values":[ - 3000, - 2500, - 2800, - 1900, - ... - ] - } - ] - } - ], - "queryMetadata":{ - "endTime":"2020-07-01T15:00:00Z" - } -} -``` - -{{< call-out "important" >}} -You cannot use `dimensions` with the `seriesLimit` parameter. -{{< /call-out >}} - -## What's Next - -- [Metrics Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}})) -- [Dimensions Reference]({{< ref "/controller/analytics/catalogs/dimensions.md" >}})) -- [Create Custom Dashboards]({{< ref "/controller/analytics/dashboards/custom-dashboards.md" >}})) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/overview-metrics-metadata.md b/content/controller/analytics/metrics/overview-metrics-metadata.md deleted file mode 100644 index 5f22c4722..000000000 --- a/content/controller/analytics/metrics/overview-metrics-metadata.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: Understanding how the F5 NGINX Controller Agent collects and reports - metrics and metadata. -nd-docs: DOCS-536 -title: 'Overview: Metrics and Metadata' -toc: true -weight: 20 -type: -- reference ---- - -## Overview - -The data that F5 NGINX Controller collects can be divided into two categories: - -- **System metrics**: Data collected from the NGINX Plus API, the NGINX log files, and NGINX process state. -- **Traffic metrics**: Data related to processed traffic, with the ability to distinguish the Application, API endpoint, or Environment that traffic is directed through. - -{{< call-out "note" >}} -The key difference between system and traffic metrics is that traffic metrics are pre-aggregated for each time period. -{{< /call-out >}} - -Metrics are published at a regular interval of 60 or 30 seconds for system and traffic metrics, respectively. - -This topic gives an overview of the traffic metrics. Also known as "app-centric" metrics, traffic metrics contain information that lets you easily identify the App to which the data applies. - -{{< call-out "note" >}} -Refer to [View traffic metrics]({{< ref "/controller/analytics/metrics/view-traffic-metrics.md" >}}) for instructions on how to view traffic metrics using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}). -{{< /call-out>}} -## Metadata and Metrics That Are Reported - -The NGINX Controller Agent collects the following types of data: - -- **NGINX metrics.** The Agent collects NGINX-related metrics using the NGINX Plus API, and by monitoring the NGINX log files and NGINX process state. -- **AVRD metrics.** AVRD sends app-centric data, so each metric has assigned dimensions like "application name" or "gateway". These metrics are related to processed traffic (for example, the number of bytes sent to a particular URL/endpoint). -- **NGINX configuration.** After the initial installation, the NGINX configuration is uploaded to the NGINX Controller server. Configuration updates are also uploaded to the NGINX Controller server. -- **System metrics.** These are key metrics describing the system. For example: CPU usage, memory usage, network traffic, etc. -- **NGINX metadata.** These describe your NGINX instances, and include package data, build information, the path to the binary, build configuration options, and so on. NGINX metadata also includes the NGINX configuration elements. -- **System metadata.** These are the basic information about the OS environment where the Agent runs. For example, the hostname, uptime, OS flavor, and other data. - -For the full list of metrics, see the [Metrics Catalog Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) - -## Metrics Collection and Reporting Process - -The Agent mostly uses Golang's [gopsutil](https://github.com/shirou/gopsutil) to collect OS metrics. - -While the Agent is running on the host, it collects metrics at regular 20-second intervals. Metrics then are downsampled and sent to the Controller server once per minute. The Agent reports metadata to the NGINX Controller server every minute. Changes to the metadata can be examined using the Controller user interface. - -NGINX Controller stores historical metrics data in an analytics database. Metrics are aggregated and rolled-up as follows: - -- Data not older than 8 days are stored with best possible resolution (usually 1 min). -- Data older than 8 days but not older than 30 days are stored with 5 min resolution. -- Data older than 30 days but not older than 15 months are stored with 1 hour resolution. -- Data older than 15 months are stored with 1 day resolution. - -### Parsing and Analyzing NGINX Configuration Files - -NGINX configuration updates are reported only when a configuration change is detected. - -The Agent checks the Controller server every 30 seconds for pending NGINX configuration changes. When changes are pending, the changes are applied and the NGINX is reloaded. Because the configuration is managed in the Controller server, the entire configuration is written to a single `nginx.conf` file. - -If the Agent cannot reach the Controller server to send the accumulated metrics, it continues to collect metrics and sends them to the Controller server as soon as connectivity is re-established. The maximum amount of data that can be buffered by the Agent is about 2 hour's worth of data. - -The Agent is able to automatically find all relevant NGINX configuration files, parse them, extract their logical structure, and send the associated JSON data to the Controller Server for further analysis and reporting. - -To parse SSL certificate metadata, the NGINX Controller Agent uses standard `openssl`(1) functions. SSL certificates are parsed and analyzed only when the corresponding [Agent settings]({{< ref "/controller/admin-guides/config-agent/configure-the-agent.md#default-agent-settings" >}}) are turned on. SSL certificate analysis is `off` by default. - -## Troubleshooting - -Most metrics are collected by the Agent without requiring the user to perform any additional setup. For troubleshooting instructions, see [Troubleshooting NGINX Controller Metrics]({{< ref "/controller/support/troubleshooting-controller.md" >}}). - -## What's Next - -- [Set up Metrics Collection]({{< ref "/controller/admin-guides/config-agent/configure-metrics-collection.md" >}}) -- [Metrics Reference]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/overview-traffic-metrics.md b/content/controller/analytics/metrics/overview-traffic-metrics.md deleted file mode 100644 index 4a6aad116..000000000 --- a/content/controller/analytics/metrics/overview-traffic-metrics.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: Understanding how traffic metrics are collected, aggregated, and reported. -nd-docs: DOCS-537 -title: 'Overview: Traffic Metrics' -toc: true -weight: 100 -type: -- concept -- reference ---- - -## Overview - -The data that F5 NGINX Controller collects can be divided into two categories: - -- **System metrics**: Data collected from the NGINX Plus API, the NGINX log files, and NGINX process state. -- **Traffic metrics**: Data related to processed traffic, with the ability to distinguish the Application, API endpoint, or Environment that traffic is directed through. - -{{< call-out "note" >}} -The key difference between system and traffic metrics is that traffic metrics are pre-aggregated for each time period. -{{< /call-out >}} - -Metrics are published at a regular interval of 60 or 30 seconds for system and traffic metrics, respectively. - -This topic gives an overview of the traffic metrics. Also known as "app-centric" metrics, traffic metrics contain information that lets you easily identify the App to which the data applies. - -{{< call-out "note" >}} -Refer to [View traffic metrics]({{< ref "/controller/analytics/metrics/view-traffic-metrics.md" >}}) for instructions on how to view traffic metrics using the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}). -{{< /call-out>}} - -## Available traffic metrics - -- `client.latency.{total | max | min | count}` -- `client.network.latency.{total | max | min | count}` -- `client.request.latency.{total | max | min | count}` -- `client.ttfb.latency.{total | max | min | count}` -- `client.response.latency.{total | max | min | count}` -- `upstream.network.latency.{total | max | min | count}` -- `upstream.header.latency.{total | max | min | count}` -- `upstream.response.latency.{total | max | min | count}` -- `http.request.bytes_rcvd` -- `http.request.bytes_sent` -- `http.request.count` - -{{< call-out "note" >}} -Refer to the [NGINX Controller Metrics Catalog]({{< ref "/controller/analytics/catalogs/metrics.md" >}}) for details about these and the other metrics that NGINX Controller reports. -{{< /call-out>}} - -## Calculating traffic metrics - -As traffic flows through a configured application, NGINX Controller collects the traffic-related data. With heavy traffic, the number of single, distinguishable metrics can be challenging to discern. For this reason, the metric values are aggregated. - -The aggregation happens every publish period -- this period is stored in the `aggregation_duration` dimension, and is usually 30 seconds -- and is based on metric dimensions. - -Metrics are aggregated using four aggregation functions: - -- **SUM** for `http.request.bytes_rcvd`, `http.request.bytes_sent` and all metrics with `.total` suffix. -- **MAX** for metrics with `.max` suffix. -- **MIN** for metrics with `.min` suffix. -- **COUNT** for metrics with `.count` suffix. - -### Example - -To better understand how metrics are aggregated, consider the following example: - -Imagine you have one application configured with one URI (recorded in the `http.uri` dimension of each traffic-related metric). In the last 30 seconds, a user queried that URI five times. The `client.request.latency` values for the requests were: 1 ms, 2 ms, 3 ms, 4 ms, and 5 ms. - -The final metric values returned by the Metrics API will be: - -- `http.request.count` = 5 -- `client.request.latency.total` = 15 ms -- `client.request.latency.max` = 5 ms -- `client.request.latency.min` = 1 ms -- `client.request.latency.count` = 5 - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/metrics/view-traffic-metrics.md b/content/controller/analytics/metrics/view-traffic-metrics.md deleted file mode 100644 index d7f0acbe2..000000000 --- a/content/controller/analytics/metrics/view-traffic-metrics.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: How to view the traffic metrics gathered by NGINX Controller Analytics. -nd-docs: DOCS-538 -title: View Traffic Metrics -toc: true -weight: 150 -type: -- how-to -- tutorial ---- - -## Overview - -This topic explains how to use the [NGINX Controller REST API]({{< ref "/controller/api/_index.md" >}}) - to view traffic metrics. - -{{< call-out "note" >}} -Refer to [Overview: Traffic Metrics]({{< ref "/controller/analytics/metrics/overview-traffic-metrics.md" >}}) to learn how NGINX Controller collects, aggregates, and reports traffic metrics. -{{< /call-out>}} - -## Before You Begin - -To view traffic metrics, first confirm that you've correctly configured NGINX Controller. - -The following resources should have the status `Configured`: - -- [Environment]({{< ref "/controller/services/manage-environments.md" >}}) -- [Gateway]({{< ref "/controller/services/manage-gateways.md" >}}) -- [App and Component]({{< ref "/controller/app-delivery/manage-apps.md" >}}) - -Initially, the graphs will display `No data yet`, and querying the Metrics API for traffic metrics will result in an empty response. As soon as the Component starts to receive traffic, the traffic-related data will be displayed in the graphs and the [Dashboards]({{< ref "/controller/analytics/dashboards/overview-dashboard.md" >}}) in the NGINX Controller user interface and will be returned in API responses. - -{{< call-out "note" >}} -If traffic stops flowing to a resource (for example, an Application or Component), then no traffic metrics will be available for the resource. -{{< /call-out >}} - -## View Traffic Metrics Using the REST API - -- To view the full list of metrics and dimensions, send a GET request to the `/analytics/catalogs/metrics` endpoint: - - ```curl - curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/metrics" - ``` - -- To view a detailed description for a metric, send a GET request to the `/analytics/catalogs/metrics/{metricName}` endpoint: - - ```curl - curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/metrics/client.latency.total" - ``` - -- Likewise, to view the full list of available dimensions, send a GET request to the `/analytics/catalogs/dimensions` endpoint: - - ```curl - curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/catalogs/dimensions" - ``` - -{{< call-out "note" >}} -Refer to the [Catalogs Reference]({{< ref "/controller/analytics/catalogs/_index.md" >}}) for information about all of the dimensions and metrics collected by NGINX Controller. -{{< /call-out>}} - -## Example REST API Queries for Traffic Metrics - -Because traffic metrics are already aggregated, you should be careful about using the Metrics API for aggregations. - -### Example 1 - -Goal: Retrieve the total number of requests for the last 3 hours: - -```curl -curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=SUM(http.request.count)&startTime=now-3h" -``` - -The Metrics API returns a single value per dimension set. That value is the sum of the aggregated values (in 30s intervals) for the last 3 hours. - -### Example 2 - -Goal: Retrieve an average value of max client latencies for my app -- let's call it `app1` -- for the last day: - -```curl -curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=AVG(client.latency.max)&startTime=now-24h&filter=app='app1'" -``` - -### Example 3 - -{{< call-out "important" >}} -Because traffic metrics are pre-aggregated, using AVG aggregation with these metrics isn't recommended. -{{< /call-out >}} - -Imagine you have one application configured with one URI (recorded in the `http.uri` dimension of each traffic-related metric). In the last 30 seconds, a user queried that URI 5 times. The `client.request.latency` values for each request were: 1 ms, 2 ms, 3 ms, 4 ms, 5 ms. - -The final metric values returned by the Metrics API will be: - -- `client.request.latency.total` = 15 ms -- `client.request.latency.count` = 5 - -The following query returns the average `client.request.latency.total = 15`, as you have one aggregated sample with value 15. - -```curl -curl -X GET --cookie "session=" --url "{Controller-FQDN}/api/v1/analytics/metrics?names=AVG(client.request.latency.total)&startTime=now-24h" -``` - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/analytics/view-app-security-analytics.md b/content/controller/analytics/view-app-security-analytics.md deleted file mode 100644 index ee33ecfaa..000000000 --- a/content/controller/analytics/view-app-security-analytics.md +++ /dev/null @@ -1,272 +0,0 @@ ---- -description: How to view App Security Analytics. -nd-docs: DOCS-539 -title: View App Security Analytics -toc: true -weight: 500 -type: -- concept -- reference ---- - -## Overview - -When App Security flags or blocks a request made to an App Component as a security violation, it generates an App Security event. -You can use the F5 NGINX Controller web interface or the REST API to view these events or their related statistics (measures). Metrics reflect the number of requests and bytes flagged or blocked. You can use the Security Violation Dimensions to help understand and interpret the analytics data. - -For descriptions of Security Metrics and Events Dimensions, refer to [About App Security]({{< ref "/controller/app-delivery/security/concepts/what-is-waf.md" >}}) page. - -## View App Security Analytics - -You can use the NGINX Controller user interface or the REST API to view App Security Analytics. You can use this data to get a quick, high-level understanding of how the App Security module processes requests to an App. - -1. Open the NGINX Controller user interface and log in. -2. On the Navigation Bar, select **Services**. -3. On the Services Menu, select **Apps**. -4. On the Apps Overview page, select the App name link. -5. Select **Security Analytics** under the Analytics sub-menu. - -## View Security Analytics for Components - -To view Security Analytics for individual Components, take the steps below. - -1. Open the NGINX Controller user interface and log in. -2. On the Navigation Bar, select **Services**. -3. On the Services Menu, select **Apps**. -4. On the Apps Overview page, select the App name link. -5. Select **Components** from the menu. Select the Component name link. -6. Select **Security Analytics** under the Analytics sub-menu. - -### View App Security Events - -To view app security events: - -1. Open the NGINX Controller user interface and log in. -2. On the Navigation Bar, select **Services**. -3. On the Services Menu, select **Apps**. -4. On the Apps Overview page, select the App name link. -5. Select **Security Events** under the Analytics sub-menu. - -### View Security Events for Components - -To view the security events for components, take the following steps: - -1. Open the NGINX Controller user interface and log in. -2. On the Navigation Bar, select **Services**. -3. On the Services Menu, select **Apps**. -4. On the Apps Overview page, select the App name link. -5. Select **Components** from the sub-menu. Select the Component name link. -6. Select **Security Events** under the Analytics sub-menu. - -## Example REST API Queries for App Security Metrics - -Requests which App Security has rejected or allowed: - -```curl -https://{{host}}/api/v1/analytics/metrics? - startTime=0& - endTime=now& - names=sum(http.request.count)& - groupBy=request_outcome& - resolution=30m -``` - -Possible request outcomes are: - -- Passed: WAF allowed the request -- Rejected: WAF blocked the request - -To get request counts based on how App Security processed the traffic: - -```curl -https://{{host}}/api/v1/analytics/metrics? - startTime=0& - endTime=now& - resolution=5m& - names=sum(http.request.count)& - groupBy=request_outcome_reason& - filter=( - app='shopping' and - environment='prod' and - component='app-component') -``` - -| **request_outcome_reason values** | **Description** | -|--------------------------------|-----------------| -| \ | App Security did not process the traffic (in other words, App Security is not enabled). All events with this request_outcome_reason value should have a request_outcome `PASSED`.| -| SECURITY_WAF_OK | App Security processed the traffic and no violations are found. All events with this request_outcome_reason value should have a request_outcome of `PASSED`.| -| SECURITY_WAF_FLAGGED | App Security allowed the request, but it was flagged for review. All events with this request_outcome_reason value should have a request_outcome of `PASSED`.| -| SECURITY_WAF_VIOLATION | App Security identified one or more violations and rejected the request. All events with this request_outcome_reason value should have a request_outcome of `REJECTED`.| - -If you feel App Security is blocking too many requests, you can turn on monitor-only mode. - -### Security Violation Events - -You can use Security Violation Events to investigate violations identified by App Security for requests made to an App Component. Follow the steps below to view the Security Events: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select **Analytics**. -3. On the **Analytics Menu**, select **Component**. - -You can use the following example Events requests to collect App Security Analytics data by using the NGINX Controller REST API: - -- To view ‘security violation’ Events: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation') - ``` - -- To get security violation details based on the Support ID seen on the request blocking page: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - waf.support_id='1880765231147185611') - ``` - -- To get all events where WAF rejected to investigate: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - request_outcome='REJECTED') - ``` - -- To get all events where WAF flagged to investigate: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - request_outcome_reason='SECURITY_WAF_FLAGGED') - ``` - -- To get all events where WAF has rejected or flagged to review: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - request_outcome_reason in ('SECURITY_WAF_VIOLATION','SECURITY_WAF_FLAGGED')) - ``` - -- To get all events where WAF has rejected or flagged for a specific App Component: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - request_outcome_reason in ('SECURITY_WAF_VIOLATION','SECURITY_WAF_FLAGGED') and - app='shopping' and - environment='prod' and - component='app-component') - ``` - - {{< call-out "tip" >}} -To get all Events, remove the Environment, App, and Component filters from the request call. - {{< /call-out >}} - -- To find requests flagged by App Security’s violation rating algorithm as a possible or likely threat: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - request_outcome_reason = 'SECURITY_WAF_FLAGGED' and - waf.violation_rating in ('POSSIBLE_ATTACK','MOST_LIKELY_ATTACK') and - app='shopping' and - environment='prod' and - component='app-component') - ``` - - {{< call-out "important" >}} -This is important if you are using App Security WAF monitoring only mode. You can use it to understand the type of threats WAF believes should be blocked. - {{< /call-out >}} - -- To get Events that have triggered a specific signature-based violation by signature id: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - waf.signature_ids ='*200000098*' and - app='shopping' and - environment='prod' and - component='app-component') - ``` - - The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of signatures triggered by App Security per request. - -- To get Events that have triggered a specific a signature-based violation by signature id: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - waf.signature_names IN ('DIRECTORY_TRAVERSAL') and - app='shopping' and - environment='prod' and - component='app-component') - ``` - - The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of signatures triggered by App Security per request. - -- To get Events that triggered a particular attack type: - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - waf.attack_types='*Non-browser Client, Abuse of Functionality*' and - app='shopping' and - environment='prod' and - component='app-component') - ``` - - The substring search using wildcards or ‘IN’ operand should be used because each signature might be part of various combinations of attack types triggered by App Security per request. - -- To get Events from a remote address (client IP) - - ```curl - GET https://{{host}}/api/v1/analytics/events? - startTime=0& - endTime=now& - filter=( - category='security violation' and - http.remote_addr='172.18.71.147' and - app='shopping' and - environment='prod' and - component='app-component') - ``` - -## Related Pages - -- [About App Security]({{< ref "/controller/app-delivery/security/concepts/what-is-waf.md" >}}) - -{{< versions "3.11" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/api-management/_index.md b/content/controller/api-management/_index.md deleted file mode 100644 index 91312bebd..000000000 --- a/content/controller/api-management/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Tasks for deploying and managing your APIs. -title: API Management -weight: 155 -url: /nginx-controller/api-management/ ---- - diff --git a/content/controller/api-management/manage-apis.md b/content/controller/api-management/manage-apis.md deleted file mode 100644 index 5c7a389aa..000000000 --- a/content/controller/api-management/manage-apis.md +++ /dev/null @@ -1,505 +0,0 @@ ---- -description: Use the F5 NGINX Controller API Manager to add APIs and control how your - APIs are exposed and consumed. -nd-docs: DOCS-569 -title: Manage Your APIs -toc: true -weight: 110 -type: -- tutorial ---- - -## Overview - -The F5 NGINX Controller API Management module provides full life cycle management for your APIs. This document provides a walkthrough of the steps needed to create, version, and publish your API using the NGINX Controller API Management module. When you have completed this guide, you should have the following resources: - -- An **API Definition**, which stores a collection of related API resources. It can be thought of as a folder. -- An **API Version**, which describes a particular API and serves as the data contract. It describes available endpoints and operations on each endpoint and can also include API documentation. -- A **Published API**, which represents an API Version that has been deployed to an NGINX Plus instance serving as an API Gateway. -- (Optional) API documentation available via the Developer Portal. - -{{< call-out "note" >}} - -- You must have an API Management module license installed to complete the steps in this guide. -- The API Management module is available to users with the predefined [Admin or User Roles]({{< ref "/controller/platform/access-management/manage-roles.md#predefined-roles-and-role-groups" >}}). - -{{< /call-out >}} - -## Create an API Definition - -An API Definition functions as a collection of related API resources. - -1. Open the NGINX Controller user interface and log in. - -2. Select the NGINX Controller menu icon, then select **Services**. - -3. On the **Services** menu, select **APIs**. - -4. On the **All APIs** page, select **Create** and choose **API Definition**. Alternatively, you can also select **Create API Definition** from the Quick Actions list. - -## Create an API Version - -An API Version describes a particular API. It can be thought of as an API specification. - -1. Open the NGINX Controller user interface and log in. - -2. Select the NGINX Controller menu icon, then select **Services**. - -3. On the **Services** menu, select **APIs**. - -4. On the **All APIs** page, select **Create** and choose **API Version**. Alternatively, you can also select **Create API Version** from the Quick Actions list. - -5. Select an existing **API Definition** under which to group the API Version or select **Create New** to add a new **API Definition**. - -6. Choose how you would like to describe the API: - - 1. [OpenAPI specification](#import-an-openapi-specification) - - 2. [Configure manually](#define-api-resources-manually) - - 3. [WSDL file](#import-a-web-services-description-language-wsdl-file) (Currently only supports unauthenticated, unencrypted traffic) - -7. Provide a version. If a version isn't provided, the default version `unspecified` is used. - -8. (Optional) Provide a display name. - -9. (Optional) Provide a description. - - {{< call-out "note" >}} - - If your API specification includes a description, that text populates this field automatically when you [add your OpenAPI spec](#import-an-openapi-specification). - - {{< /call-out >}} - -10. (Optional) Add tags. - -### Import an OpenAPI Specification - -The APIM module supports import of a valid OpenAPI v3 specification formatted as valid JSON or YAML. - -{{< call-out "note" >}} - -If your API spec includes documentation elements, the "Enable documentation" option is selected automatically. You do not need to take any additional steps to document your API. - -{{< /call-out >}} - -**To import your spec by uploading a file:** - -1. Select **OpenAPI Specification**. - -2. Select **Import file**. - -3. Drag and drop your file into the upload box, or select **Browse** to find and upload a file. - -**To import your spec by copying and pasting:** - -1. Select **OpenAPI Specification**. - -2. Select **Copy and paste specification text**. - -3. Paste or type your API in the space provided. - -Once you have imported your API spec, select **Next** to continue to the **Resources** page. - -### Define API Resources Manually - -Take the steps below to manually add your API resources. - -1. Select **Configure Manually**. - -2. Select **Next** to continue to the **Resources** page. - -3. Select **Add API Resource**. - -4. Select the **Match Type** to use for the API resource path. - -5. Specify the **Path** for the API resource. -**Tip**: Path should start with `/`, for example, `/userlookup/{userid}/attributes/{surname}`. - -6. Select the HTTP method(s). - -7. (Optional) [Document Your API](#document-your-api). - -8. Review the API spec that will be submitted to create the **API Version**. - -9. Select **Submit** to save the **API Version**. - -### Document Your API - -Follow the steps below to document your API. - -{{< call-out "note" >}} - -API documentation must follow the OpenAPI 2.0/3.0 Specification. - -If you uploaded an API spec that contains documentation, you don't need take any further steps to document your API. - -{{< /call-out >}} - -{{< call-out "tip" >}} - -Skip to step 6 if you're continuing from the [Define API Resources Manually](#define-api-resources-manually) section. - -{{< /call-out >}} - -1. Open the NGINX Controller user interface and log in. - -2. Select the NGINX Controller menu icon, then select **Services**. - -3. On the **Services** menu, select **APIs**. - -4. On the **All APIs** page, select the **API Version** for which you want to create documentation. Click the pencil (edit) button to edit the API Version. - -5. Select **Resources**. - -6. Select the pencil (edit) icon next to the method or methods that you want to document. - -7. Select **Enable Documentation**. - -8. Add a summary. - -9. (Optional) Add a description. - -10. (Optional) Add a request body description. - -11. (Optional) Add a sample request. - -12. Specify whether the request body is required. - -13. To add a parameter, select **Add Parameter**. - -14. Provide the parameter name. - -15. (Optional) Provide a parameter description. - -16. Select the parameter type. - -17. Select the parameter value. - -18. (Optional) Specify whether the parameter is required. - -19. To add a response, select **Add Response**. - -20. Provide the HTTP Response status code. - -21. Provide a description. - -22. (Optional) Provide a sample response in JSON format. - -23. Select **Next** to review the API spec that will be submitted to update the **API Version**. - -24. Select **Submit** to save the **API Version**. - -### Import a Web Services Description Language (WSDL) file - - {{< call-out "caution" >}} - -Currently, only HTTP is supported for SOAP-REST proxy traffic. Traffic will be unauthenticated and unencrypted, and as a result will be vulnerable to several security risks. It should be treated as a beta/preview feature. - - {{< /call-out >}} - -The APIM module supports importing a WSDL file that describes a SOAP service. - -**To import your spec by uploading a file:** - -1. Select **WSDL File**. - -2. Select **Import file**. - -3. Drag and drop your file into the upload box, or select **Browse** to find and upload a file. - -**To import your spec by copying and pasting:** - -1. Select **WSDL file**. - -2. Select **Copy and paste WSDL text**. - -3. Paste or type your API in the space provided. - -Once you've imported your API spec, select **Next** to continue to the **Resources** page. Note that you need to submit the API spec before you can modify the **Resources** and **Schema**. Select **Submit** to save the **API Version.** - -### Modify Schema and Resources for an API Version created from a WSDL file - -Take the following steps to **Edit** add your API Version: - -1. On the **All APIs** page, select the **API Version** that was created from a WSDL - -2. Select **Next** to continue to the **Resources** page. - -3. For each **SOAP operation**, choose the appropriate equivalent **REST Method**. - -4. (optional) Modify the **Path** for the API resource as desired. - - {{< call-out "tip" >}} - - Path should start with `/`, for example, `/userlookup/{userid}/attributes/{surname}`. - - {{< /call-out >}} - -5. Select **Next** to continue to the **Schema** page - -6. (Optional) For each JSON schema, modify the **Property** as desired - -7. Review the API spec that will be submitted to create the **API Version**. - -8. Select **Submit** to save the **API Version**. - -## Publish an API - -You need at least one of each of the resources listed below to complete this section. If you haven't already created the required resources, you can do so while configuring the Published API. - -- [Environment]({{< ref "/controller/services/manage-environments.md" >}}) - -- [Gateway]({{< ref "/controller/services/manage-gateways.md" >}}) - -- [App]({{< ref "/controller/app-delivery/manage-apps.md" >}}) - -- [Identity Provider]({{< ref "/controller/services/manage-identity-providers.md" >}}) - - (required to add Authentication to the Published API Component). - -{{< call-out "tip" >}} -You can connect one or more [Developer Portals]({{< ref "/controller/api-management/manage-dev-portals.md" >}}) to your Published API to host your API documentation. This can be done either when creating or editing your Published API, or independently via the API Quick Actions menu. -{{< /call-out >}} - -### Add a Published API - -1. Open the NGINX Controller user interface and log in. - -2. Select the NGINX Controller menu icon, then select **Services**. - -3. On the **Services** menu, select **APIs**. - -4. On the **All APIs** page, select the **API Version** that you want to publish. - -5. Select **Add Published API**. - -#### Configure the Published API - -On the **Create Published API** *Configuration* page: - -1. Select the **API Definition Version** that you want to publish. - -2. (Optional) Provide a **Base Path** for the Published API. - -3. Specify whether the **Strip Base Path** parameter is required. - - {{< call-out "note" >}} - - The `Strip Base Path` option modifies the path that is passed from the Gateway to the upstream host. When the option is selected, the base path will be removed from the original request when the request is passed to the upstream host. If the option is not selected, the original request -- including the base path -- is passed from the Gateway to the upstream host. - - {{< /call-out >}} - -4. Provide a Name and/or Display Name for the Published API. - -5. (Optional) Provide a description for the Published API. - -6. (Optional) Add tags. - -7. Select **Next**. - -#### Define the Published API Deployment - -For each of the steps below, you can create a new resource for the Published API by selecting the **Create New** link. - -On the **Create Published API** *Deployment* page: - -1. Select the **Environment** that the Published API belongs to. - -2. Select the **App** that the Published API represents. - -3. Select the **Gateway(s)** that will expose the Published API. - -4. Select the **Dev Portal(s)** that will host documentation for the Published API. - -5. Select **Next**. - -#### Define the Routing Rules - -On the **Create Published API** *Routing* page: - -1. Select the **Add New** link to create a new App Component resource for the Published API. The **Create App Component** page has multiple sections. - -2. On the **Create App Component** *Configuration* page: - - 1. Provide the name for your Component. - - 2. (Optional) Provide a Display Name. - - 3. (Optional) Provide a Description. - - 4. (Optional) Add any desired tags. - - 5. (Optional) Select the error response format. - - 6. Select **Next**. - -3. On the **Create App Component** *Workload Groups* page: - - 1. Provide a Workload Group Name. - - 2. (Optional) Select a Location. The default Location is 'Unspecified'. This value is applied automatically to "bring your own" (BYO) NGINX Plus instances that were not deployed by NGINX Controller. - - 3. Define the backend workload URIs. - - 4. (Optional) Define the DNS Server. - - 5. (Optional) Select the Load Balancing Method. The default value is `Round Robin`. - - 6. (Optional) Select the Session Persistence Type (applicable only to Web Components). - - 7. (Optional) Select the Desired Proxy Settings (applicable only to Web Components). - - 8. Select **Next**. - {{< call-out "note" >}} - - - Refer to the [Manage Locations]({{< ref "/controller/infrastructure/locations/manage-locations.md" >}}) topic for more information. - - - Refer to the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/) for more information about the available options. - - {{< /call-out>}} - {{< call-out "tip" >}} - Hover your pointer over the info icon for each setting to learn about the expected values and requirements. - {{< /call-out >}} - - -4. On the **Create App Component** *Rate Limiting* page: - - 1. Enable Rate Limiting and select a **Key**. - - 2. Select options for Rate and Units. - - 3. (Optional) Select options for Excess Request Processing and Ignore Initial N Requests. - - 4. Select options for Reject Status Code. - - 5. Select **Next**. - -5. On the **Create App Component** *Authentication* page: - - 1. Select **Add Authentication**. - - 2. Select an [**Identity Provider**]({{< ref "/controller/services/manage-identity-providers.md" >}}). - - 3. Select a **Credential Location**. - - 1. (Optional) Enable [**Conditional Access**]({{< ref "/controller/services/available-policies.md#conditional-access" >}}). - - 4. Select **Next**. - -{{< call-out "important" >}} - -The **Advanced Security** features require an *NGINX Controller API Management Advanced Security* license. - -{{< /call-out >}} - -6. On the **Create App Components** *Advanced Security* page: - - 1. (Optional) Select **Enable Web Application Firewall (WAF)** to monitor and block suspicious requests or attacks. - - 2. (Optional) Select **Monitor Only** to allow traffic to pass without being rejected. Security events are still generated and metrics are still collected. Refer to [About App Security Analytics]({{< ref "/controller/analytics/view-app-security-analytics.md" >}}) for more information. - - 3. (Optional) Add the signature(s) that you want WAF to ignore. You can specify multiple signatures as a comma-separated list. - - 4. Select **Next** - - {{< call-out "note" >}} Refer to the [Default WAF Policy]({{< ref "/controller/app-delivery/security/concepts/app-sec-default-policy-original.md" >}}) topics to learn more about the default protection provided by NGINX App Protect. {{< /call-out>}} - - -7. On the **Create App Component** *Ingress* page: - - 1. (Optional) Set the desired **Client Max Body Size**. - 2. Select **Next**. - - {{< call-out "note" >}} - - Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) for more information about this option. - - {{< /call-out>}} - - -8. On the **Create App Component** *Monitoring* page: - - 1. (Optional) Enable **Health Monitoring** and define the desired Monitoring Request and Response. Health Monitoring is disabled by default. - - 2. (Optional) Specify the URI to use in health check requests (applicable only to Web Components). The default is `/`. For TCP/UDP Components, specify the Send string. - - 3. (Optional) Specify the port to use when connecting to a server to perform a health check. The server port is used by default. - - 4. (Optional) Set the interval to wait between two consecutive health checks. The default is 5 seconds. - - 5. (Optional) Specify the number of consecutive passed health checks that must occur for a server to be considered healthy. The default is `1`. - - 6. (Optional) Specify the number of consecutive failed health checks that must occur for a server to be considered unhealthy. The default is `1`. - - 7. (Optional) Specify the default state for the server. The default state is `HEALTHY`. - - 8. (Optional) Specify the starting HTTP status code to match against (applicable only to Web components). - - 9. (Optional) Specify the ending HTTP status code to match against (applicable only to Web components). - - 10. (Optional) Select whether a response should pass in order for the health check to pass (applicable only to Web components). By default, the response should have status code `2xx` or `3xx`. - - 11. Select **Next**. - - {{< call-out "note" >}} - - Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) for more information about these options. - - {{< /call-out>}} - -9. On the **Create App Component** *Logs* page: - - 1. (Optional) Select the logs to enable: - - § Error Log - - § Access Log - - 2. (Optional) Specify the log format to use. - - 3. Select **Next**. - - {{< call-out "note" >}} - - Refer to the [NGINX docs](http://nginx.org/en/docs/http/ngx_http_log_module.html) for more information about these options. - - {{< /call-out>}} - -9. On the **Create App Component** *Programmability* page: - - The following settings are applicable **only to Web components**. - - 1. (Optional) Select **Add URI Redirects** and define the desired redirect condition(s). - - 2. (Optional) Select **Add URI Rewrite** and define the desired rewrite pattern(s). - - 3. (Optional) Select **Add Request Header Modification** and define how to modify the request header. - - 4. (Optional) Select **Add Response Header Modification** and define how to modify the response header. - - 5. Select **Next**. - - {{< call-out "note" >}} - - Refer to the [NGINX module docs](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html) for more information about these options. - - {{< /call-out>}} - - 6. Select **Next** to review the API spec that will be sent to create the App Component. - - 7. Drag and drop resources one at a time, or move multiple resources by selecting the checkboxes next to the desired resources, from the **Unrouted** column to the desired Component in the **Components** list. You can use the search bar to narrow down the list. - **Note:** Resources can be dragged between **Components** and back to the **Unrouted** section either one at a time or by multi-select. - - 8. Select **Next** to review the API spec that will be sent to create the Published API. - - 9. Select **Submit** to create the Published API. - -## Create a Developer Portal - -Once you have created an API Definition and a Published API, you can host your API in a Developer Portal. - -From the **API Definitions** page, select **Create Dev Portal** from the Quick Actions menu. Then, follow the steps in [Create a Developer Portal]({{< ref "/controller/api-management/manage-dev-portals.md" >}}) to create, customize, and publish your Dev Portal. - -{{< versions "3.0" "3.18" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} diff --git a/content/controller/api-management/manage-dev-portals.md b/content/controller/api-management/manage-dev-portals.md deleted file mode 100644 index 3f3f1aa5d..000000000 --- a/content/controller/api-management/manage-dev-portals.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -description: Learn how to create and manage Developer Portals for your API documentation. -nd-docs: DOCS-570 -title: Manage Developer Portals -toc: true -weight: 120 -type: -- tutorial ---- - -## Overview - -You can use F5 NGINX Controller Developer Portals (also called 'Dev Portals') to create and manage beautiful, easy-to-use API reference documentation to support your [Published APIs]({{< ref "/controller/api-management/manage-apis.md#publish-an-api" >}}). - -## About Developer Portals - -In NGINX Controller, each Dev Portal sits within an Environment. An Environment can contain multiple Dev Portals. You can use the same Dev Portal names across different Environments, which means you can create "test", "dev", and "production" versions of your Dev Portal across the corresponding Environments. - -Each Dev Portal is associated with a Gateway, which defines the URI at which users can access the Dev Portal -- for example, `developer.acme.com`. A Gateway for a Developer Portal can be placed on a dedicated Instance, or share an Instance with other Gateway resources. - -## Before You Begin - -You must complete the steps below before you can create a Developer Portal. - -1. [Create an Environment]({{< ref "/controller/services/manage-environments.md" >}}). -1. [Create a Gateway]({{< ref "/controller/services/manage-gateways.md" >}}) for the Dev Portal. - - {{< call-out "tip" >}} -You can create multiple Dev Portal Gateways on the same Instance. If you do so, be sure to use a unique hostname and port for each. For example: - -- Gateway 1's ingress URI is `https://dev-developer.acme.com`. -- Gateway 2's ingress URI is `https://test-developer.acme.com`. These resources might both have IP addresses and ports that are accessible only from within your private network. -- Gateway 3's ingress URI is `https://developer.acme.com`. This resource would have a public IP address and be accessible via the internet. - -If you create multiple Dev Portal Gateways on the same Instance using the same hostname and port, the Dev Portal configuration will fail. - {{< /call-out >}} - -1. [Create an API Definition]({{< ref "/controller/api-management/manage-apis.md#create-an-api-definition" >}}). - - {{< call-out "tip" >}} -If you choose to [define your API manually]({{< ref "/controller/api-management/manage-apis.md#define-resources-manually" >}}), be sure to [document your API]({{< ref "/controller/api-management/manage-apis.md#document-your-api" >}}). - {{< /call-out >}} - -1. [Create a Published API]({{< ref "/controller/api-management/manage-apis.md#publish-an-api" >}}). - - {{< call-out "important" >}} -You must create an App Component when creating a Published API. You'll [assign routes]({{< ref "/controller/api-management/manage-apis.md#define-the-routing-rules" >}}) from the API Definition to this Component. - -Both the Published API and the associated App Component must be successfully created before you can create a Dev Portal. - -See [Manage Your APIs]({{< ref "/controller/api-management/manage-apis.md" >}}) and the [troubleshooting](#troubleshoot-dev-portal-publication) section below for more information. - -You also have the option to associate Dev Portal(s) in the *Deployment* page when you [Add a Published API]({{< ref "/controller/api-management/manage-apis.md#add-a-published-api" >}}). If you already have a Published API and you want to create a new Dev Portal to host it, complete the tasks described in this guide. - - {{< /call-out >}} - -## Create a Developer Portal - -To create a Dev Portal, take the steps below: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select Services. -3. On the **Services** menu, select APIs. -4. On the APIs page, select **Create Dev Portal** from the Quick Actions menu. - - {{< call-out "tip" >}} -If you want to connect one or more Dev Portals to an existing Published API, you should select the **Edit Published API** option. The API Documentation will be published to the selected Dev Portal(s). Refer to the [Define the Published API Deployment]({{< ref "/controller/api-management/manage-apis.md#define-the-published-api-deployment" >}}) section for more information and instructions. - {{< /call-out >}} - -### Configure the Developer Portal - -On the **Create Dev Portal** *Configuration* page: - -1. Provide a resource name for the Dev Portal. -2. (Optional) Provide a display name, description, and tags. -3. Select the desired Environment, or select Create to create a new resource. -4. Select a Gateway, or select Create to create a new resource. -5. Select the Published API(s) that you want to host in the Dev Portal. -6. Select **Next** to move to the **Themes** page. - -### Define the Dev Portal Theme - -On the **Create Dev Portal** *Themes* page: - -1. Select **Brand** to define the following elements: - - - **Brand Name**, - - **Logo**, and - - **Favicon** - -2. Select **Next**. -3. Set the **Colors** for theme elements. Then, select **Next**. -4. Set the **Fonts** for the theme. Then, select **Next**. -5. Review the **API Spec**, then select **Submit**. - -> You should now be able to access the Dev Portal via the hostname and port that you assigned to the Dev Portal Gateway. - -## View, Edit, or Delete a Developer Portal - -To view, edit, or delete a Dev Portal, take the steps below: - -1. Open the NGINX Controller user interface and log in. -2. Select the NGINX Controller menu icon, then select Services. -3. On the **Services** menu, select APIs. -4. On the APIs menu, select **Dev Portals**. - -To **edit** a Dev Portal: - -1. Select the **Edit** icon for the Dev Portal. -2. Edit the Dev Portal as desired. - - - Select **Configure** to update the Dev Portal configurations, including the Environment, Gateway, and Published API. - - Select **Brand** to customize the **Brand Name** and to upload a **Logo** and **Favicon**. - - Select **Color** to customize the Dev Portal theme colors. - - Select **Fonts** to customize the Dev Portal theme fonts. - -3. Select **Submit** to save your changes. - -To **delete** a Dev Portal, select the **Delete** icon. Then, select **Delete** in the confirmation prompt window. - -## Troubleshoot Dev Portal Publication - -If the Gateway that the Dev Portal is associated with is in an error state, publishing your Dev Portal will fail. You won't necessarily see an error in the Dev Portals section of the user interface when this happens, but configuration errors in these resources will impact Dev Portal functionality. - -- App Component configuration errors are displayed only in the App Component section of the user interface. -- Published API configuration errors are displayed in the Published APIs section of the user interface, as well as in the Dev Portal. -- Dev Portal configuration errors are not displayed in the NGINX Controller user interface. - -If your Dev Portal failed to publish, check the status of the Gateway first; resolve any issues with the Gateway, then try publishing the Dev Portal again. -If the issue persists, check the other resources for configuration errors. - -## What's Next - -- [Learn about Policies]({{< ref "/controller/services/available-policies.md" >}}) -- [Manage Your APIs]({{< relref "./manage-apis.md" >}}) - -{{< versions "3.7" "3.18" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} diff --git a/content/controller/api/_index.md b/content/controller/api/_index.md deleted file mode 100644 index 100d0ce46..000000000 --- a/content/controller/api/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Controller REST API. -title: API Reference -weight: 210 -url: /nginx-controller/api/reference/ ---- - diff --git a/content/controller/api/overview.md b/content/controller/api/overview.md deleted file mode 100644 index d37acb406..000000000 --- a/content/controller/api/overview.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -description: Provides information about the F5 NGINX Controller API. -nd-docs: DOCS-343 -layout: docs -title: API Overview -toc: true -weight: 10 -type: -- concept ---- - -## Introduction - -The F5 NGINX Controller API is a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) API that allows you to programmatically manage your NGINX Plus data planes. - -NGINX Controller follows an "API-first" approach, which means that all NGINX Controller functionality is exclusively exposed through declarative and resource-oriented APIs. Even the user interface (user interface) uses our REST API! You'll find examples of REST request bodies in the user interface. You can rest assured that the example you see is correct, because that is the call that the user interface is going to make to apply your requested configuration. - -## Encoding - -All NGINX Controller API endpoints expect and return JSON-formatted data by default. -When appropriate, the API accepts and returns other media types, such as file uploads or downloads. - -All JSON-formatted data is expected to be encoded using UTF-8 as described by the [IETF JSON Spec](https://tools.ietf.org/html/rfc8259). -If you do not specify a specific media type in an API call, then the API defaults to `"application/json"`. If you specify multiple acceptable media types, the first type that the API supports is chosen for the response. In the event of a request for a media type that the API doesn't support, it returns a "415 Unsupported Media Type" response. - -## Object Model - -The NGINX Controller API -- as well as the user interface and the product documentation -- is organized into four top-level areas: - -- **Analytics**: Enables data visualization for NGINX Controller. -- **Infrastructure**: Lets you manage your NGINX Plus instances and certain aspects of the host machines on which NGINX Controller and NGINX Plus instances run. -- **Platform**: Lets you manage NGINX Controller options and configurations, including Users, Roles, Licenses, and Global Settings. -- **Services**: Lets you manage your applications and APIs. - -The diagrams below demonstrate how the different objects at the Service level relate to each other: - -1. All Service objects are part of an Environment. -1. Gateways and Certs can be defined at the Environment level --or-- at the Component Level. The diagram below shows an example of how traffic flows through a Gateway to an App. -1. Components are child objects that represent the back-end server(s) that host your App or API. - {{< call-out "note" >}}A Component can represent an application **or** an API. The same Component cannot be used for both App Delivery and API Management.{{< /call-out >}} -1. Certs can be added to a Gateway or to an individual Component. - -{{< img src="/ctlr/img/services-object-model-example.png" alt="Diagram showing the relationship of objects in an Environment within the Services area." >}} -{{< img src="/ctlr/img/traffic-flow-example-1.png" alt="Example traffic flow through a gateway to app components that represent a back-end application. Certs can be configured at the gateway or at the app component level." >}} - -### Permissions - -Access to each of these areas is determined by your User Role. Roles grant Users access to specific Environments; Role permission levels define what content you can see ("Read" access) and interact with ("Write" access). Users with Roles that contain "Full" access can interact with all areas. - -The diagram below shows a sample System Administrator (or, "SysAdmin") workflow. The SysAdmin user has full administrator permissions, which allows creation of objects in all areas. In this workflow, the SysAdmin user creates an Environment; then creates a Role that has permission to interact with objects in that Environment; and, finally, creates a User. The Role grants the User access to objects in the Environment. - -{{< img src="/ctlr/img/netops-workflow.png" alt="Example System Admin workflow" >}} - -The diagram below shows a sample deployment workflow. In this workflow, the user - a Deployment Manager - has read and write access to objects in one specific Environment, but no access to other Environments. Within the allowed Environment, the user can create objects or select from objects that were added by a system administrator. In this workflow, the Deployment Manager creates an App and an App Component. Associated objects like Certs and Gateways can be added -- or selected from a list -- when adding the App Component. The configs for load balancing, monitoring, and URI redirects are defined as part of the App Component as well. - -{{< img src="/ctlr/img/devops-workflow-simple.png" alt="Example deployment workflow" >}} - -{{< call-out "note" >}} - -- [Managing Roles & Users]({{< ref "/controller/platform/access-management/manage-users.md" >}}) - -{{< /call-out>}} - -## Authentication - -The NGINX Controller API uses session cookies to authenticate requests. The session cookie is returned in response to a `GET /api/v1/platform/login` request. See the Login endpoint in the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}) documentation for information about session cookie timeouts and invalidation. - -{{< call-out "tip" >}} -You can send a GET request to the login endpoint to find the status of the session token. -{{< /call-out >}} - -For example: - -- Login and capture the session cookie: - - ```curl - curl -c cookie.txt -X POST --url 'https://198.51.100.10/api/v1/platform/login' --header 'Content-Type: application/json' --data '{"credentials": {"type": "BASIC","username": "arthur@example.net","password": ""}}' - ``` - -- Use the session cookie to authenticate and get the session status: - - ```curl - curl -b cookie.txt -c cookie.txt -X GET --url 'https://198.51.100.10/api/v1/platform/login' - ``` - - -## Resource Types - -The NGINX Controller API contains two types of resources: immediately consistent and eventually consistent. - -Immediately consistent resources are synchronous. For these resources, any changes you make will be applied at the time the request is received. Requests to modify state using an API write operation (POST, PUT, PATCH or DELETE) result in the transmitted data being stored by the server as state. There is no need to check for progress, success, or failure using an API read operation (GET) for these resources. The original response should communicate if the request was successful. - -Eventually consistent resources are asynchronous. For these resources, any changes you request will be applied over time. Requests to modify state using an API write operation (POST, PUT, PATCH or DELETE) result in the transmitted data being stored by the server and messages or events being generated to eventually apply this state. You may check for progress, success, or failure using an API read operation (GET). The original response communicates that the data resulting in instructions was understood by the system. - -## Resource Properties - -All NGINX Controller API resources contain the following properties: - -```json -{ - "metadata": { - }, - "desiredState": { - }, - "currentStatus": { - } -} -``` - -The `desiredState` property is a representation of the state that you want to apply to the system. The properties within `desiredState` are the API representation of data. While changes to `desiredState` may trigger eventually consistent operations, the object itself is "immediately consistent". Consumers of the API can "read their own writes" and should always be able to retrieve the current desired state, no matter where the system is in the process of applying the state change. - -The `currentStatus` property represents the current state of the system. Its purpose is to communicate the progress of achieving eventual consistency to the API consumer. As such, `currentStatus` is a read-only property. - -## Versioning - -The introduction of backwards-incompatible changes to the NGINX Controller API constitutes a major version change. This will be represented in the NGINX Controller API version string. For example, to use a `v2` API, you would use `https:///api/v2`. - -When any NGINX Controller component requires a version change, we will release a new version of the entire API. In other words, you won't see a mix of `v1` and `v2` objects in the same API. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.18" "latest" "apimvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/api/reference/ctlr-adc-api.md b/content/controller/api/reference/ctlr-adc-api.md deleted file mode 100644 index 04ead674a..000000000 --- a/content/controller/api/reference/ctlr-adc-api.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Represents the state of the F5 NGINX Controller Application Delivery - REST API. -nd-docs: DOCS-1280 -type: -- reference -doctypes: - - reference -type: redoc -tags: - - api -title: ADC API -toc: false -weight: 300 -nd-api-reference: "./nginx-controller/api/reference/ctlr-adc-openapi.json" ---- diff --git a/content/controller/api/reference/ctlr-analytics-api.md b/content/controller/api/reference/ctlr-analytics-api.md deleted file mode 100644 index 70e1214f6..000000000 --- a/content/controller/api/reference/ctlr-analytics-api.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Represents the state of the F5 NGINX Controller Analytics REST API. -nd-docs: DOCS-1279 -type: -- reference -doctypes: - - reference -type: redoc -tags: - - api -title: Analytics API -toc: false -weight: 200 -nd-api-reference: "./nginx-controller/api/reference/ctlr-analytics-openapi.json" ---- diff --git a/content/controller/api/reference/ctlr-apim-api.md b/content/controller/api/reference/ctlr-apim-api.md deleted file mode 100644 index 5add27db2..000000000 --- a/content/controller/api/reference/ctlr-apim-api.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Represents the state of the F5 NGINX Controller API Management REST API. -nd-docs: DOCS-1281 -type: -- reference -doctypes: - - reference -type: redoc -tags: - - api -title: APIM API -toc: false -weight: 400 -nd-api-reference: "./nginx-controller/api/reference/ctlr-apim-openapi.json" ---- diff --git a/content/controller/api/reference/ctlr-platform-api.md b/content/controller/api/reference/ctlr-platform-api.md deleted file mode 100644 index 562a3db72..000000000 --- a/content/controller/api/reference/ctlr-platform-api.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Represents the state of the F5 NGINX Controller Platform REST API. -nd-docs: DOCS-1278 -doctypes: - - reference -type: redoc -tags: - - api -title: Platform API -toc: false -nd-api-reference: "./nginx-controller/api/reference/ctlr-platform-openapi.json" ---- diff --git a/content/controller/app-delivery/_index.md b/content/controller/app-delivery/_index.md deleted file mode 100644 index 021e9ba97..000000000 --- a/content/controller/app-delivery/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Tasks for deploying and managing your applications. -title: Application Delivery -weight: 152 -url: /nginx-controller/app-delivery/ ---- - diff --git a/content/controller/app-delivery/about-app-delivery.md b/content/controller/app-delivery/about-app-delivery.md deleted file mode 100644 index 544e54607..000000000 --- a/content/controller/app-delivery/about-app-delivery.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: Learn about F5 NGINX Controller Application Delivery concepts. -nd-docs: DOCS-474 -title: About Application Delivery -toc: true -weight: 100 ---- - -## Apps - -In F5 NGINX Controller, an App serves as a container for one or more Components. Components represent the backend services that comprise your application. Together, an App and its Components represent the logical partitioning of your application into its composite parts. For example, a Component might correspond to a particular microservice within your application. Each Component you add to an App represents one or more paths via which traffic can reach that microservice. - -All Apps and Components live within an [Environment]({{< ref "/controller/services/manage-environments.md" >}}). This means that in order to have access to a particular App, a User needs to have permission to access its Environment. If you need access to an Environment or App, contact your administrator. - -## Components - -A Component is a child object of an App. Components let you partition an App into smaller, self-contained pieces that are each responsible for a particular function of the overall application. For example, a Component could correspond to a microservice that, together with several other microservices, comprises a complete application. - -Each Component contains an ingress definition that includes the fully-qualified domain names (FQDNs) and URIs from clients. These ingress definitions associate incoming requests with a particular path; the certificates that are used for decryption/encryption of HTTPS requests and responses that traverse that path; the backend servers that host the App to which the path delivers the requests; and the rewrites, redirects, and modifications on the requests/responses that occur along the path. - -Components can be instantiated on multiple paths corresponding to the placements associated with the Component; these placements are defined within the [Gateway(s)]({{< ref "/controller/services/manage-gateways.md" >}}) referenced in the Component. - -## Inherited or Independent Resources - -When you configure a Component, you can choose to: - -- inherit resources and configurations from the Gateway; -- create and define new resources and configurations specific to the Component; or -- use a combination of inherited and Component-specific configurations. - -For example, a Gateway's ingress definition might include the URIs for a Service's FQDN(s) and the associated TLS [certificates]({{< ref "/controller/services/manage-certs.md" >}}), while the Component's ingress definition would contain relative URIs for the FQDN defined in the Gateway: - -- Gateway Ingress URIs: `www.example.com` -- Component Ingress URIs: `/about/`, `/docs/`, `/contact/` - -Together, the Component's relative paths and the Gateway's FQDN results form the absolute URI for each path (`www.example.com/about/`, `www.example.com/docs/`, and `www.example.com/contact/`). - -Likewise, you can configure a Component with its own FQDN and paths, but inherit the TLS certificates from the Gateway. Or, you can configure a Component that doesn't inherit any resources or configurations from the Gateway and uses its own set of definitions. - -{{< call-out "note" >}}The ability to add resources, like Certificates, is determined by your account permissions. If you don't have the ability to add new Certs, contact your administrator. {{< /call-out >}} - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/about-caching.md b/content/controller/app-delivery/about-caching.md deleted file mode 100644 index 6e5f9c005..000000000 --- a/content/controller/app-delivery/about-caching.md +++ /dev/null @@ -1,387 +0,0 @@ ---- -description: Learn how F5 NGINX Controller handles caching configurations and what - NGINX cache directives are supported. -nd-docs: DOCS-339 -title: About Caching -toc: true -weight: 200 -type: -- concept ---- - -## Overview - -The F5 NGINX Controller Application Delivery (AD) module lets you configure content caching by using either the user interface (UI) or the [Components REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Components). - -## Basic Caching - -NGINX Controller Caching supports [basic caching](https://www.nginx.com/blog/nginx-caching-guide/#How-to-Set-Up-and-Configure-Basic-Caching) via the *disk store* resource. - -When you add a disk store to a component, you define the location of the cache on the hard disk. The path you specify for the disk store is the base path under which you want to store the cache files for the component. - -{{< call-out "important" >}} -The directory that you want to use as the cache must already exist and the NGINX process must have read and write permissions to it. Otherwise, NGINX Controller can't create the cached folders and files. - -If NGINX Controller can't create the desired cache directory and/or write files to it, the user interface will display an error for the component. -{{< /call-out >}} - -When you use the UI or the REST API to create a single disk store, NGINX Controller adds the following directives to the auto-generated `nginx.conf` file: - -- [`proxy_cache_path`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path), in the top-level `http` context; -- [`proxy_cache`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache), added to the component's `location` block. - -You can include NGINX Controller Caching data when creating [custom dashboards]({{< ref "/controller/analytics/dashboards/custom-dashboards" >}}) and [alerts]({{< ref "/controller/analytics/alerts/manage-alerts" >}}) for your applications. - -## Cache Splitting - -NGINX Controller Caching also supports splitting the cache across multiple directories, which can reside on different hard drives. To split the cache, you need to create a disk store for each desired cache location. The Caching *split config* settings let you determine how NGINX Controller should split the data between the disk stores -- either by percentage or by pattern matching. - -The percentage option lets you set the percentage of the cache to store in each location. Pattern matching lets you define where to store cache contents -- like certain file types -- and which cache location should send a response based on the request. - -{{< call-out "note" >}} -Read the [NGINX Caching Guide](https://www.nginx.com/blog/nginx-caching-guide/#Splitting-the-Cache-Across-Multiple-Hard-Drives) to learn more about splitting the cache across multiple hard drives. -{{< /call-out>}} - -When you define a split cache, NGINX Controller adds a `split_clients` configuration block with percentage split or a `map` configuration block with string split to the `http` context of the generated `nginx.conf` file. - -## Advanced Caching - -As noted earlier in this topic, you can use Caching to manage basic caching use cases. -To add any of the [`ngx_http_proxy_module`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html) cache directives listed below, use NGINX Controller **Snippets**. - -- [`proxy_cache_background_update`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_background_update) -- [`proxy_cache_bypass`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_bypass) -- [`proxy_cache_convert_head`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_convert_head) -- [`proxy_cache_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) -- [`proxy_cache_lock`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock) -- [`proxy_cache_lock_age`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_age) -- [`proxy_cache_lock_timeout`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_timeout) -- [`proxy_cache_max_range_offset`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_max_range_offset) -- [`proxy_cache_methods`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_methods) -- [`proxy_cache_min_uses`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_min_uses) -- [`proxy_cache_purge`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_purge) -- [`proxy_cache_revalidate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_revalidate) -- [`proxy_cache_use_stale`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_use_stale) -- [`proxy_cache_valid`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid) -- [`proxy_no_cache`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_no_cache) -- [`proxy_temp_path`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_path) - -In order to enable the collection of app centric caching metrics, NGINX Controller has added a minimal set of APIs to enable and control caching. For more advanced caching features, you can make use of `configSnippets` to configure the directives above. - -{{< call-out "note" >}} -When you enable the temporary path for disk store with `tempPath:ENABLED`, you need to set the temporary path `proxy_temp_path` using the snippets API. -{{< /call-out >}} - - -{{< call-out "note" >}} -NGINX Controller does not collect or report metrics for directives configured using Snippets. -{{< /call-out >}} - -## Usage Examples - -Each of the examples provided here shows a sample API request and the resulting NGINX config file. These examples are for learning purposes only and are not intended for use in production settings. - -### Basic Caching {#basic-caching-example} - -The example below shows an excerpt of a REST API request that sets up basic caching. This example defines one server as the cache location. - -```json -"desiredState": { - "caching": { - "diskStores": [ - { - "path": "/tmp/cache-1", - "maxSize": "5G", - "minFree": "10k", - "inMemoryStoreSize": "500M", - "inactiveTime": "2s" - } - ] - } -} -``` - -The above request modifies the NGINX Controller-generated `nginx.conf` file as follows: - -- Adds a `proxy_cache_path` directive for the disk store to the `http` context; -- Adds a new `proxy_cache` directive to the `location` block for the component. - -```Nginx configuration file {hl_lines=[1,14]} -proxy_cache_path /tmp/cache-1/app_centric_example-env|example-app-1|example-app-component| max_size=5G min_free=10k keys_zone=app_centric_example-env|example-app-1|example-app-component|/tmp/cache-1:500M purger=off; - -server { - server_name test.example.com; - listen 80; - status_zone server_5ae404e8-005d-38e8-b355-6d54cb219730; - set $f5_gateway example-gw; - f5_metrics_marker gateway $f5_gateway; - set $f5_environment example-env; - f5_metrics_marker environment $f5_environment; - location / { - error_log /dev/null; - access_log off; - proxy_cache app_centric_example-env|example-app-1|example-app-component|/tmp/cache-1; - set $f5_app example-app-1; - f5_metrics_marker app $f5_app; - set $f5_component example-app-component; - f5_metrics_marker component $f5_component; - proxy_set_header X-Forwarded-For $remote_addr; - proxy_set_header Host $host; - proxy_set_header Connection ''; - proxy_http_version 1.1; - proxy_pass http://wg-example_http_b4859463-b3bd-4ccb-8442-e21253a50da7; - } -} -``` - -### Cache Splitting using Percentage and Snippets {#split-percentage-example} - -You can set up cache splitting using the Percentage criteria to define the percent of the cache to store in each location. - -The example request excerpt below does the following: - -- splits the cache across three different storage paths; -- sets one of the stores -- `/tmp/default` -- as the default; -- uses the Component `configSnippets.uriSnippets` API to configure the [`add_header`](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header) directive, to include `Cache` header with "HIT/MISS/EXPIRED/BYPASS" in the response; -- uses the Component `configSnippets.uriSnippets` API to set a cache duration time of 1m for all requests using [`proxy_cache_valid`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid). - -```json -{ - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive": "proxy_cache_valid", - "args": [ - "any", - "1m" - ] - } - ] - } - ] - }, - "programmability": { - "responseHeaderModifications": [ - { - "action": "ADD", - "headerName": "X-Cache-Status", - "headerValue": "$upstream_cache_status" - } - ] - }, - "caching": { - "splitConfig": { - "criteriaType": "PERCENTAGE", - "key": "$request_uri" - }, - "diskStores": [ - { - "inMemoryStoreSize": "100m", - "inactiveTime": "1m", - "isDefault": false, - "maxSize": "5G", - "minFree": "10k", - "path": "/tmp/hdd1", - "percentCriteria": "20%" - }, - { - "inMemoryStoreSize": "100m", - "inactiveTime": "10s", - "isDefault": false, - "maxSize": "5g", - "minFree": "10k", - "path": "/tmp/hdd2", - "percentCriteria": "50%" - }, - { - "inMemoryStoreSize": "100m", - "inactiveTime": "15s", - "isDefault": true, - "maxSize": "2g", - "minFree": "10k", - "path": "/tmp/default" - } - ] - } - } -} -``` - -The above request modifies the `nginx.conf` file as follows: - -- Adds the `split_clients` directive to the `http` context, reflecting the criteria defined for `diskStores`; -- Adds a `proxy_cache_path` directive for each disk store to the `http` context; -- Adds a new `proxy_cache` variable -- `$cache_` -- to the `location` block for the component; -- Adds the `proxy_cache_valid` and `add_header` directives to the `location` block for the component. - -```Nginx configuration file {hl_lines=["1-8",27,36,37]} -split_clients $request_uri $cache_bdfa5d91f97d37dbb97a42dde6a5f4ff { - 20% app_centric_env|app|split_cache_percentage|/tmp/hdd1; - 50% app_centric_env|app|split_cache_percentage|/tmp/hdd2; - * app_centric_env|app|split_cache_percentage|/tmp/default; -} -proxy_cache_path /tmp/hdd1/app_centric_env|app|split_cache_percentage| max_size=5G min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/hdd1: 100m purger=off inactive=1m; -proxy_cache_path /tmp/hdd2/app_centric_env|app|split_cache_percentage| max_size=5g min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/hdd2: 100m purger=off inactive=10s; -proxy_cache_path /tmp/default/app_centric_env|app|split_cache_percentage| max_size=2g min_free=10k keys_zone=app_centric_env|app|split_cache_percentage|/tmp/default: 100m purger=off inactive=15s; -upstream split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906 { - zone split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906 160k; - server 10.146.187.154: 80; - keepalive 64; - keepalive_requests 100; - keepalive_timeout 60s; -} -server { - server_name test.example.com; - listen 80 reuseport; - status_zone server_4d1ee345-cf08-354e-93dc-1c3a844a04e3; - set $f5_gateway gw; - f5_metrics_marker gateway $f5_gateway; - set $f5_environment env; - f5_metrics_marker environment $f5_environment; - location /aaa { - error_log /dev/null; - access_log off; - proxy_cache $cache_bdfa5d91f97d37dbb97a42dde6a5f4ff; - set $f5_app app; - f5_metrics_marker app $f5_app; - set $f5_component split_cache_percentage; - f5_metrics_marker component $f5_component; - proxy_set_header X-Forwarded-For $remote_addr; - proxy_set_header Host $host; - proxy_set_header Connection ''; - proxy_http_version 1.1; - add_header Cache $upstream_cache_status; - proxy_cache_valid any 1m; - proxy_pass http: //split_p_http_7ec84d9e-373e-4d90-bcaa-0e33dcc4b906; -} -``` - -### Cache Splitting using Pattern Matching and Snippets {#split-string-example} - -You can also use pattern matching to cache based on a certain string (`stringCriteria`) for each store. For example, you can define the string criteria as a list of file formats, as shown in the request excerpt below. As in the [percentage example](#split-percentage-example), we're also using the Components `configSnippets` API here to set the `add_header` and `proxy_cache_valid` directives. - -The request below splits the cache into three different stores. - -- One store is the default location and has no string criteria defined. -- One store is the location for all `.html`files. -- Ones store is the location for all `.mp4` files. - -```json -"desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive": "proxy_cache_valid", - "args": [ - "any", - "1m" - ] - } - ] - } - ] - }, - "programmability": { - "responseHeaderModifications": [ - { - "action": "ADD", - "headerName": "X-Cache-Status", - "headerValue": "$upstream_cache_status" - } - ] - }, - "caching": { - "splitConfig": { - "criteriaType": "STRING", - "key": "$request_uri" - }, - "diskStores": [ - { - "inMemoryStoreSize": "10m", - "inactiveTime": "1m", - "isDefault": false, - "maxSize": "2G", - "minFree": "1m", - "path": "/tmp/hdd1", - "stringCriteria": ["~.html$"] - }, - { - "inMemoryStoreSize": "50m", - "inactiveTime": "1m", - "isDefault": false, - "maxSize": "1g", - "minFree": "10k", - "path": "/tmp/hdd2", - "stringCriteria": ["~.mp4$"] - }, - { - "inMemoryStoreSize": "30m", - "inactiveTime": "1m", - "isDefault": true, - "maxSize": "2g", - "minFree": "10k", - "path": "/tmp/default" - } - ] - } -} -``` - -The above request modifies the `nginx.conf` file as follows: - -- Adds a `map` directive to the `http` context, reflecting the string criteria defined for the disk stores. -- Adds a `proxy_cache_path` directive to the `http` context for each disk store. -- Adds a new variable `$cache_` to the `location` block for the component. - -```Nginx configuration file {hl_lines=["1-8",30,39,40]} -map $request_uri $cache_8de5273e13f731e283acbc999760c3e3 { - ~.html$ app_centric_env|app|split_string|/tmp/hdd1; - ~.mp4$ app_centric_env|app|split_string|/tmp/hdd2; - default app_centric_env|app|split_string|/tmp/default; -} -proxy_cache_path /tmp/hdd1/app_centric_env|app|split_string| max_size=2G min_free=1m keys_zone=app_centric_env|app|split_string|/tmp/hdd1:10m purger=off inactive=1m; -proxy_cache_path /tmp/hdd2/app_centric_env|app|split_string| max_size=1g min_free=10k keys_zone=app_centric_env|app|split_string|/tmp/hdd2:50m purger=off inactive=1m; -proxy_cache_path /tmp/default/app_centric_env|app|split_string| max_size=2g min_free=10k keys_zone=app_centric_env|app|split_string|/tmp/default:30m purger=off inactive=1m; -upstream wg_http_0ace772a-0c68-4d01-a443-6e377d4f6133 { - zone wg_http_0ace772a-0c68-4d01-a443-6e377d4f6133 160k; - server 10.146.187.154:80; - keepalive 64; - keepalive_requests 100; - keepalive_timeout 60s; -} -map $host $f5_published_api { - default -; -} -server { - server_name test.example.com; - listen 80 reuseport; - status_zone server_4d1ee345-cf08-354e-93dc-1c3a844a04e3; - set $f5_gateway gw; - f5_metrics_marker gateway $f5_gateway; - set $f5_environment env; - f5_metrics_marker environment $f5_environment; - location / { - error_log /dev/null; - access_log off; - proxy_cache $cache_8de5273e13f731e283acbc999760c3e3; - set $f5_app app; - f5_metrics_marker app $f5_app; - set $f5_component split_string; - f5_metrics_marker component $f5_component; - proxy_set_header X-Forwarded-For $remote_addr; - proxy_set_header Host $host; - proxy_set_header Connection ''; - proxy_http_version 1.1; - add_header Cache $upstream_cache_status; - proxy_cache_valid any 1m; - proxy_pass - } -} -``` - -{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/about-snippets.md b/content/controller/app-delivery/about-snippets.md deleted file mode 100644 index 83c3c063f..000000000 --- a/content/controller/app-delivery/about-snippets.md +++ /dev/null @@ -1,564 +0,0 @@ ---- -nd-docs: DOCS-340 -title: About Snippets -toc: true -weight: 300 -type: -- concept ---- - -## Overview - -The F5 NGINX Controller Application Delivery (AD) module lets you configure NGINX directives that aren't represented in the NGINX Controller API via "config snippets", or "Snippets". You can do so by using either the user interface (UI) or the [Application Delivery REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/). - -{{< call-out "caution" >}} -When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the snippet. - -We strongly recommend verifying Snippets in a lab environment before making any changes in production. -{{< /call-out >}} - -## Types of Snippets - -There are five types of Snippets, which you can configure for gateways or components. This lets you add custom directives into the corresponding NGINX configuration blocks generated by the gateways and components for the associated URIs. - -{{< call-out "note" >}}The `uriSnippets` can't be used for TCP/UDP components.{{< /call-out >}} - -{{}} - -| Snippet | Description | Corresponding API Endpoint | -| ----------------------- | ------------------------------------------------------------------ | -------------------------- | -| `httpSnippet` | Adds directives to the `http` block. | Gateway | -| `mainSnippet` | Adds directives to the `main` block. | Gateway | -| `streamSnippet` | Adds directives to the `stream` block. | Gateway | -| `uriSnippets` | Adds directives to the component's `server` and `location` blocks. | Component | -| `uriSnippets` | Adds directives to the gateway's `server` blocks. | Gateway | -| `workloadGroupSnippets` | Adds directives to the `upstream` blocks. | Component | - -{{}} - -## Best Practices - -### Gateway Partitions - -It's important to avoid adding conflicting snippets to the same [context](https://docs.nginx.com/nginx/admin-guide/basic-functionality/managing-configuration-files/#contexts) in your NGINX configuration file. We recommend that you create one stand-alone Gateway to hold the `main`, `http`, and `stream` snippets. Doing so lets you share the configuration for these contexts across Gateways that define the URIs (`server` blocks) for particular instances while reducing the risk of duplicate or conflicting settings. - -### NGINX Variables - -NGINX configurations commonly use [NGINX variables](https://nginx.org/en/docs/varindex.html) or custom variables. If you prefer to configure NGINX Controller by using the REST API, you may run into problems with variable expansion when sending JSON as part of a `curl` request using th `-d` flag. The recommended best practice for this is to reference the JSON in a data file instead of sending the string as part of the request. An alternative is to redefine the variable to itself, which allows the variable to pass through to the NGINX configuration. If you're using the NGINX `$host` variable in your JSON data -- represented by the `` placeholder in the example below -- you would define the variable before the curl request as follows: - -```none -host='$host' curl -s -k -H "Content-Type: application/json" -X PUT -d "" https://192.168.100.10:80/ -``` - -## Usage Examples - -{{< call-out "caution" >}} -The examples provided here are intended for demonstration purposes only. -We strongly recommend verifying Snippets in a lab environment before making any changes in production. -{{< /call-out >}} - -### Add HTTP Strict Transport Security Headers - -If you want to implement a [HTTP Strict Transport Security](https://www.nginx.com/blog/http-strict-transport-security-hsts-and-nginx/) (HSTS) policy, you can add a snippet to your gateway. -For example: - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "applicableUris": [ - { - "uri": "http://172.16.0.238:81" - } - ], - "directives": [ - { - "directive": "add_header", - "args": ["Strict-Transport-Security", "max-age=31536000; includeSubDomains", "always"] - } - ] - } - ] - }, - "ingress": { - "uris": { - "http://example.com:8020": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - -### Allow or Deny IP Addresses - -You can add IP addresses to your allow- or deny-list by using the `allow` or `deny` directives in a gateway snippet. For example: - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "applicableUris": [ - { - "uri": "" - } - ], - "directives": [ - { - "directive": "deny", - "args": ["192.0.2.2"] - }, - { - "directive": "allow", - "args": ["192.0.2.1/24"] - }, - { - "directive": "allow", - "args": ["2001:0db8::/32"] - }, - { - "directive": "deny", - "args": ["all"] - } - ] - } - ] - }, - "ingress": { - "uris": { - "http://example.com:8020": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - - -### Load the NGINX Prometheus Module - -In order to use the [NGINX Prometheus-njs](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/prometheus-njs/) module with NGINX Controller, you need to use`load_module` in the `main` context, `js_import` in the `http` context, and `js_content` in the `location`. NGINX Controller automatically enables the location api `location /api`, which is also required for metrics reporting. - -After installing the module, add the following Snippets to your gateway. This will add `load_module` and `js_import`: - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "mainSnippet": { - "directives": [ - { - "directive": "load_module", - "args": ["modules/ngx_http_js_module.so"] - } - ] - }, - "httpSnippet":{ - "directives": [ - { - "directive": "js_import", - "args": ["/usr/share/nginx-plus-module-prometheus/prometheus.js"] - } - ] - } - }, - "ingress": { - "uris": { - "http://example.com:8020": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} - -``` - -Then, you'd add a config snippet similar to the example below to your component. - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "applicableUris": [ - { - "uri": "/metrics" - } - ], - "directives": [ - { - "directive":"js_content", - "args": ["prometheus.metrics"] - } - ] - } - ] - }, - "ingress": { - "uris": { - "http://example.com:8020": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - - -### NGINX as a WebSocket Proxy - -If you want to use NGINX Controller to configure [NGINX as a WebSocket Proxy](https://www.nginx.com/blog/websocket-nginx/), you can customize your `nginx.conf` by using Snippets and header programmability. - -In the gateway, provide an `http` snippet that defines the `map` directive and the `server` configuration: - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "httpSnippet": { - "directives": [ - { - "directive": "map", - "args": ["$http_upgrade", "$connection_upgrade"], - "block": [ - { - "directive": "default", - "args": ["upgrade"] - }, - { - "directive": "''", - "args": ["close"] - } - ] - } - ] - } - }, - "ingress": { - "uris": { - "http://example.com:8020": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - -Then, add the two required headers to the component using `requestHeaderModifications`. For example: - -```json -{ - "metadata": { - "name": "", - }, - "desiredState": { - "ingress": { - "uris": { - "/": {} - }, - "gatewayRefs": [ - {"ref": "/services/environments/${env}/gateways/"} - ] - }, - "programmability": { - "requestHeaderModifications": [ - { - "action": "ADD", - "headerName": "Upgrade", - "headerValue": "$http_upgrade" - }, - { - "action": "ADD", - "headerName": "Connection", - "headerValue": "$connection_upgrade" - } - ] - }, - "backend": { - "workloadGroups": { - "websocket": { - "uris": { - "http://:8010": {} - } - } - } - } - } -} -``` - -### Forward Errors Logs to Remote Syslog - -If you want to forward HTML error logs to [syslog](https://nginx.org/en/docs/syslog.html), you can add the `error_log` directive snippet to your gateway. -For example: - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "httpSnippet": { - "directives": [ - { - "directive": "error_log", - "args": ["syslog:server=", "debug"] - } - ] - } - }, - "ingress": { - "uris": { - "http://example.com:8000": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - -{{< call-out "note" >}} -The `error_log` and `accesslog` directives can appear at various block levels (`main`, `http`, `stream`, `server`, `location`, etc.). -NGINX Controller adds these directives to control logging to the local file. When using Snippets to add additional logging capabilities, the inner blocks override the outer block definitions. -For example, if you enable remote logging for errors at the `main` level, and you add an `error_log` directive to a `server` or `location` block that uses local logging, the local logging configuration overrides the remote logging configured at the `main` level. -{{< /call-out >}} - -### Manage IPv6 Addresses - -You can use Snippets to manage IPv6 addresses for HTTP and TCP/UDP use cases. IPv6 address management is supported in both Gateway and Component Snippets. - -- Be sure to set the `reuseport` option for all IPv6 listen directives. Failure to do so can cause bind errors. -- NGINX Controller's post-processing logic removes the `reuseport` option in certain cases. This is a [known issue]({{< ref "/controller/releases/adc/adc-release-notes-3.22.md" >}}) when the IPv6 port matches an IPv4 port and the IPv4 listen directive does not specify an IP address (in other words, a wildcard IP). To change the IPv6 listen directive's IP address, remove the Snippet, then re-add the Snippet with a new IPv6 address. - -If you need to support IPv6 addresses for the NGINX listen directive, you can use a snippet similar to the ones shown below to achieve it. - -#### HTTP Gateway with IPv6 - -For HTTP, use the Gateway URI Snippets block to add an IPv6 [`listen`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive to the `server` blocks. - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive": "listen", - "args": [ - "[::]:80", - "reuseport" - ] - }, - { - "directive": "listen", - "args": [ - "[]:80", - "reuseport" - ] - } - ] - } - ] - }, - "ingress": { - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - }, - "uris": { - "http://example.com:80": {} - } - } - } -} -``` - -{{< call-out "note" >}}You cannot add IPv6 `listen` directives to a server block when the FQDN is defined in the Component URI (for example, `http://{FQDN}/{PATH}`). {{< /call-out >}} - -#### TCP/UDP Component with IPv6 - -For TCP/UDP, use the Component URI Snippets block to add an IPv6 [`listen`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) directive to the `server` blocks. - -##### TCP Component - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive": "listen", - "args": [ - "[::]:9090", - "reuseport" - ] - } - ] - } - ] - }, - "backend": { - "workloadGroups": { - "wg": { - "uris": { - "tcp://:9090": {} - }, - } - } - }, - "componentType": "TCPUDP", - "ingress": { - "gatewayRefs": [ - { - "ref": "/services/environments//gateways/" - } - ], - "uris": { - "tcp://*:9090": {} - } - } - } -} -``` - -##### UDP Component - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive": "listen", - "args": [ - "[]:9053", - "udp", - "reuseport" - ] - } - ] - } - ] - }, - "backend": { - "workloadGroups": { - "wg": { - "uris": { - "udp://:9053": {} - } - } - } - }, - "componentType": "TCPUDP", - "ingress": { - "gatewayRefs": [ - { - "ref": "/services/environments//gateways/" - } - ], - "uris": { - "udp://*:9053": {} - } - } - } -} -``` - -#### IPv6-only Server Block - -To add an IPv6-only `server` block, define the entire block in the Gateway HTTP or the Stream Snippets block. - -#### UI Config - -Add `listen` directives with parameters in URI Snippets. To learn more about what the `listen` directive does and what parameters it accepts, refer to the following topics: - -- [`stream` listen options](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen) -- [`http` listen options](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) - -{{< call-out "note" >}} - -The `reuseport` parameter creates an individual listening socket for each worker process. See [`reuseport` option](https://nginx.org/en/docs/http/ngx_http_core_module.html#reuseport). - -{{< /call-out >}} - -## Extend App Security with Snippets - -When adding [NGINX Controller App Security]({{< ref "add-app-security-with-waf" >}}) to your components, you can use Snippets to add NGINX App Protect directives that aren't represented in the NGINX Controller API. You can also use Snippets to [tune your F5 WAF for NGINX performance]({{< ref "/controller/app-delivery/security/tutorials/tune-waf-for-app" >}}). - -Refer to [Extend App Security with Snippets]({{< ref "extend-app-security-snippets" >}}) for more information and examples. - -{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/deploy-simple-app.md b/content/controller/app-delivery/deploy-simple-app.md deleted file mode 100644 index 78a9dc3eb..000000000 --- a/content/controller/app-delivery/deploy-simple-app.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -description: Overview of the steps required to deploy a simple App. -nd-docs: DOCS-477 -title: Deploy a Simple Application -toc: true -weight: 400 -type: -- tutorial ---- - -## Overview - -This topic provides an overview of the steps required to create a simple application by using F5 NGINX Controller's user interface. Use the links provided to learn more about each step. - -## Create an Environment - -First, you'll need to create an Environment. - -- [About Environments]({{< ref "/controller/services/manage-environments.md#about-environments" >}}) -- [Create an Environment]({{< ref "/controller/services/manage-environments.md#create-an-environment" >}}) - -## Create a Certificate - -If you want to secure your application traffic, you'll need to add Certificates. - -If you just want to deploy a simple HTTP application, skip ahead to [Gateways](#create-a-gateway). - -{{< call-out "tip" >}} Make sure that you add the new Cert to the Environment that you created in the previous step.{{< /call-out >}} - -- [About Certificates]({{< ref "/controller/services/manage-certs.md#about-certificates" >}}) -- [Create a certificate]({{< ref "/controller/services/manage-certs.md#create-a-cert" >}}) - -## Create a Gateway - -Next, you'll need to create a Gateway. Be sure to add the Gateway to your Environment. - -- [About Gateways]({{< ref "/controller/services/manage-gateways.md#about-gateways" >}}) -- [Create a Gateway]({{< ref "/controller/services/manage-gateways.md#create-a-gateway" >}}) - -## Create an Identity Provider - -If you require authentication for any Component, you need to define an Identity Provider. The provider should be in the same environment as your component. -to be in the same environment as your components. - -- [Identity Provider]({{< ref "/controller/services/manage-identity-providers.md" >}}) - -## Create an App - -Create an App. The App needs to be in your Environment and needs to connect to your Gateway. If you created a Cert by following the instructions above and added the Cert to the Gateway, the App will access the Cert via the Gateway. If you didn't add the Cert to the Gateway, you can reference the Cert in the App's definition by choosing the Cert from the Certs list. - -- [About Apps]({{< ref "/controller/app-delivery/about-app-delivery.md#apps" >}}) -- [Create an App]({{< ref "/controller/app-delivery/manage-apps.md#create-an-app" >}}) - -## Create Components for your App - -Finally, create Components for your App. Components let you partition an App into smaller, self-contained pieces that are each responsible for a particular function of the overall application. For example, a Component could correspond to a microservice that, together with several other microservices, comprises a complete application. - -- [About Components]({{< ref "/controller/app-delivery/about-app-delivery.md#components" >}}) -- [Create a Component]({{< ref "/controller/app-delivery/manage-apps.md#create-a-component" >}}) - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/manage-apps.md b/content/controller/app-delivery/manage-apps.md deleted file mode 100644 index 8dbb4ecb6..000000000 --- a/content/controller/app-delivery/manage-apps.md +++ /dev/null @@ -1,373 +0,0 @@ ---- -description: Create, view, and edit Apps and Components. -nd-docs: DOCS-478 -title: Manage Apps & Components -toc: true -weight: 300 -type: -- how-to ---- - -## Overview - -Follow the steps in this topic to learn how to create and manage Apps and App Components. - -{{< call-out "tip" >}}You can also use the F5 NGINX Controller API to create Apps and Components. See the [NGINX Controller API Reference]({{< ref "/controller/api/_index.md" >}}) for details.{{< /call-out >}} -  - -## Before You Begin - -You will need to select an [Environment]({{< ref "/controller/services/manage-environments.md#create-an-environment" >}}) and [Gateway]({{< ref "/controller/services/manage-gateways.md#create-a-gateway" >}}) -- or create new Environment and Gateway resources -- when adding a new App. - -{{< call-out "note" >}}If you do not have permission to create these resources and none are available to select, contact your system administrator.{{< /call-out >}} -  - -## Create an App - -To create an App: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Services**. -1. On the **Services** menu, select **Apps**. -1. On the **Apps** menu, select **Create App**. -1. On the **Create App** page, provide the following information: - - Name - - Environment - - Description (Optional) - - Display Name (Optional) - - Tags (Optional) -1. Select **Submit**. - -## Create a Component - -To create a Component: - -1. Open the NGINX Controller user interface and log in. -1. Select the NGINX Controller menu icon, then select **Services**. -1. On the **Services** menu, select **Apps**. -1. On the **Apps** menu, in the **Recent Apps** section, select the name of the App that you want to add the Component to. -1. On the Overview page for your App, select **Create Component**. -1. Then, complete each of the configuration sections as needed: - - - [General Configuration](#general-configuration) - - [URIs](#uris) - - [Workload Groups](#workload-groups) - - [Ingress](#ingress) - - [Backend](#backend) - - [Monitoring](#monitoring) - - [Errors and Logs](#errors-and-logs) - - [Programmability](#programmability) - - [Caching](#caching) - - [Snippets](#snippets) - - [Rate Limiting](#rate-limiting) - - [Authentication](#authentication) - - [Security](#security) - -1. When ready, review the API Spec and then select **Submit** to create the Component. - -## Configuration Options - -### General Configuration - -On the **Create App Component** *Configuration* page: - -1. Select the App Component Type: - - - Web - - TCP/UDP - -1. Provide the name for your Component. -1. (Optional) Provide a Display Name. -1. (Optional) Provide a Description. -1. (Optional) Add any desired tags. -1. (Optional) Select a **Gateway Ref** or select **Create Gateway Ref** to create a new Gateway. -1. Select **Next**. - -### URIs - -A Component definition must contain one or more URIs. - -**Web Component URIs** can be either of the following: - -- a complete URI that follows the format `[:port][/path]`, or -- a relative path that follows the format `[/...]`. - -Relative paths inherit the host URI configured for the Gateway associated with the Component. -The host and relative path(s) defined for a Component take precedence over the host defined in the associated Gateway. - -Example Web URI definitions: - -- `http://www.f5.com:8080/sales` -- `http://*.f5.com:5050/test` -- `/images` -- `/*.jpg` -- `/locations/us/wa*` - -**TCP/UDP URIs** must be a complete URI that follows the format ``. -TCP+TLS URIs can include TLS information. - -Example TCP/UDP URI definitions: - -- `tcp://192.168.1.1:12345` -- `tcp+tls://192.168.1.1:12346` -- `tcp://192.168.1.1:12345-12350` -- `tcp://*:12345` -- `udp://192.168.1.1:12345` -- `udp://*:12345` - -On the **Create App Component** *URIs* page: - -1. Define the **URIs**: - - - Select **Add URI**. - - In the **URI** box, type the URI for the Component. - - (Optional) Select a **Match Method** (applicable only to Web Components). - - (Optional) Select **Customize for this URI** to add custom **TLS Settings**. - - {{< call-out "note" >}} -TLS Settings can be inherited from the Gateway, or customized at the Component level. Enable this option if you want the Component to use a different cert than that used by the Gateway. - {{< /call-out >}} - -1. (Optional) Define the **Shared TLS Settings**. - - - To use a cert that is already associated with the Gateway, select it from the list. - - To add a new shared cert, select **Create New**. - -1. Select **Next**. - -### Workload Groups - -On the **Create App Component** *Workload Groups* page: - -1. Provide a Workload Group Name. -1. (Optional) Select a Location. - - The location determines which instances or instance groups the workload group is applied to. If any workload group specifies a location, they all must specify a location. Note: If the associated gateway uses instance groups, the location should refer to the instance group location, not the location(s) of the individual instances that make up that group. - - {{< call-out "note" >}}Refer to the [Manage Locations]({{< ref "/controller/infrastructure/locations/manage-locations.md" >}}) topic for more information.{{< /call-out>}} -1. Define the backend workload URIs. -1. (Optional) Define the DNS Server. -1. (Optional) Select the Load Balancing Method. The default value is "Round Robin". - - {{< call-out "note" >}}Refer to the [NGINX Plus Admin Guide](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/) for more information about the available options.{{< /call-out>}} - -1. (Optional) Select the Session Persistence Type (applicable only to Web Components). -1. (Optional) Select the Desired Proxy Settings (applicable only to Web Components). - - {{< call-out "tip" >}}Hover your pointer over the info icon for each setting to learn about the expected values and requirements.{{< /call-out >}} -1. Select **Next**. - -### Ingress - -On the **Create App Component** *Ingress* page: - -{{< call-out "note" >}} The following settings are applicable only to Web components. {{< /call-out >}} - -1. (Optional) Select the supported HTTP methods. -1. (Optional) Set the desired **Client Max Body Size**. - - {{< call-out "note" >}} -Refer to the [`ngx_http_core_module` docs](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) for more information about these options. - {{< /call-out>}} - -1. Select **Next**. - -### Backend - -On the **Create App Component** *Backend* page: - -{{< call-out "note" >}} The following settings are applicable only to Web components. {{< /call-out >}} - -1. (Optional) Enable [NTLM authentication](https://en.wikipedia.org/wiki/Integrated_Windows_Authentication) to allow proxying requests with NT LAN Manager (NTLM) Authentication. -1. (Optional) Specify the persistent state. -1. (Optional) Set the HTTP protocol version for proxying. -1. (Optional) Specify the Keep Alive settings: - - - **Connections**: Set the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this number is exceeded, the least recently used connections are closed. - - **Requests per Connection**: Set the maximum number of requests that can be served through one keepalive connection. After the maximum number of requests is made, the connection is closed. - - **Idle Timeout box**: Set a timeout during which an idle keepalive connection to an upstream server will stay open. -1. Select **Next**. - -### Monitoring - -On the **Create App Component** *Monitoring* page: - -1. (Optional) Enable **Health Monitoring** and define the desired Monitoring Request and Response. Health Monitoring is disabled by default. -1. (Optional) Enable **Workload Health Events**. Workload Health Events are disabled by default. -1. (Optional) Specify the URI to use in health check requests (applicable only to Web Components). The default is `/`. For TCP/UDP Components, specify the Send string. -1. (Optional) Specify the port to use when connecting to a server to perform a health check. The server port is used by default. -1. (Optional) Set the interval to wait between two consecutive health checks. The default is 5 seconds. -1. (Optional) Specify the number of consecutive passed health checks that must occur for a server to be considered healthy. The default is 1. -1. (Optional) Specify the number of consecutive failed health checks that must occur for a server to be considered unhealthy. The default is 1. -1. (Optional) Specify the default state for the server. The default state is `HEALTHY`. -1. (Optional) Specify the starting HTTP status code to match against (applicable only to Web components). -1. (Optional) Specify the ending HTTP status code to match against (applicable only to Web components). -1. (Optional) Select whether a response should pass in order for the health check to pass (applicable only to Web components). By default, the response should have status code `2xx` or `3xx`. -1. Select **Next**. - - {{< call-out "note" >}} -Refer to the [`ngx_http_upstream_hc_module` docs](http://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) for more information about these options. - {{< /call-out>}} - -### Errors and Logs - -On the **Create App Component** *Logs* page: - -1. (Optional) Select the logs to enable: - - - Error Log - - Access Log - -1. (Optional) Specify the log format to use. -1. Select **Next**. - - {{< call-out "note" >}} -Refer to the [`ngx_http_log_module` docs](http://nginx.org/en/docs/http/ngx_http_log_module.html) for more information about these options. - {{< /call-out>}} - -### Programmability - -On the **Create App Component** *Programmability* page: - -{{< call-out "note" >}} The following settings are applicable only to Web components. {{< /call-out >}} - -1. (Optional) Select **Add URI Redirects** and define the desired redirect condition(s). -1. (Optional) Select **Add URI Rewrite** and define the desired rewrite pattern(s). -1. (Optional) Select **Add Request Header Modification** and define how to modify the request header. -1. (Optional) Select **Add Response Header Modification** and define how to modify the response header. -1. Select **Next**. - - {{< call-out "note" >}} -Refer to the [`ngx_http_rewrite_module` docs](http://nginx.org/en/docs/http/ngx_http_rewrite_module.html) for more information about these options. - {{< /call-out>}} - -### Caching - -{{< call-out "note" >}} -Introduced in NGINX Controller App Delivery module v3.22. -{{< /call-out >}} - -On the **Create App Component** *Caching* page: - -1. Select the *Enable Caching* toggle to turn on caching. -1. Define the *Split Config* settings as appropriate for your component. - - - **PERCENTAGE** -- Select if you want to split the cache across two or more disk stores and assign a percentage of the store to each location. The *key* field is not required for this option if users set only one disk. - - **STRING** -- Select if you want to split the cache across two or more disk stores using pattern matching. The *key* field is required for this option. - - {{< call-out "note" >}}The *key* string must contain at least one valid [NGINX variable](https://nginx.org/en/docs/varindex.html). Example: `${request_uri}`{{< /call-out >}} - -1. Define the desired settings for the Disk Store: - - - **Path**: This is the location where the cache will be stored; this path must already exist on the data plane. - - **Max Size** - - **Min Free** - - **In Memory Store Size** - - **Is Default** - - **Temp Path** (Optional) - - **Inactive Time** (Optional) - - **Directory Level** (Optional) - - **Trim Policy** (Optional) - - **Loader Policy** (Optional) - - **Purger Policy** (Optional) - - {{< call-out "note" >}}Refer to the [`proxy_cache_path` docs](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path) for more information about these options.{{< /call-out>}} - -1. Select *Add Disk Store* to add another disk store (Optional). - This will split the cache across multiple storage locations according to the *Split Config* criteria you selected. - - The following *Split Config* options will display depending on the criteria you selected: - - **Percent Criteria** - Required when using "PERCENTAGE" criteria type; must be an integer followed by the `%` symbol; decimals are supported; for example, `75%` or `50.5%`. - - **String Criteria** - Required when using "STRING" criteria type; Depending upon the `SplitConfig`-> `Key` it could be a string like `~/html`, `~*.html$'` or IP based string like `10.1.1.2` - -1. Select **Next** to go to the next page, or **Submit** to save and submit your changes. - -### Snippets - -{{< call-out "note" >}} -Introduced in NGINX Controller App Delivery module v3.22. -{{< /call-out >}} - -Refer to the [About Snippets]({{< ref "/controller/app-delivery/about-snippets.md" >}}) topic to learn more about Snippets and how they impact the NGINX Controller-generated `nginx.conf` file. - -On the **Create App Component** *Snippets* page: - -1. Select the appropriate snippet type: - - - *Add URI Snippet*: Adds NGINX directives to the component's `server` and `location` blocks. - - *Add Workload Group Snippet*: Adds NGINX directives to the component's `upstream` block(s). - -1. Paste or type the desired snippet into the text field. - - Snippets should follow the standard `nginx.conf` format. - For example, the below URI snippet adds the `proxy_set_header` directive to the component's `server` block. - - ```Nginx configuration file - proxy_set_header Host $proxy_host; - ``` - - {{< call-out "caution" >}}When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the snippet. We strongly recommend verifying Snippets in a lab environment before making any changes in production.{{< /call-out >}} - -1. Select **Next** to preview the REST API call for your component, or **Submit** to save and submit your changes. - -### Rate Limiting - -On the **Create App Component** *Rate Limiting* page: - -{{< call-out "note" >}} The following Rate Limiting settings are applicable only to Web components. {{< /call-out >}} - -1. Enable Rate Limiting and select a **Key**. -1. Select options for Rate and Units. -1. (Optional) Select options for Excess Request Processing and Ignore Initial N Requests. -1. Select options for Reject Status Code. -1. Select **Next**. - -### Authentication - -On the **Create App Component** *Authentication* page: - -1. Select **Add Authentication**. -1. Select an [**Identity Provider**]({{< ref "/controller/services/manage-identity-providers.md" >}}). -1. Select a **Credential Location**. -1. (Optional) Enable [**Conditional Access**]({{< ref "/controller/services/available-policies.md#conditional-access" >}}). -1. Select **Next**. - -### Security - -On the **Create App Component** *Security* page: - -{{< call-out "note" >}} The following Security settings are applicable only to Web components. {{< /call-out >}} - -1. (Optional) Select **Enable Web Application Firewall (WAF)** to watch for or block suspicious requests or attacks. -1. (Optional) Select **Monitor Only** to allow traffic to pass without being rejected. Security events are still generated and metrics are still collected. Refer to [About App Security Analytics]({{< ref "/controller/analytics/view-app-security-analytics.md#overview" >}}) for more information. -1. (Optional) the signature(s) that you want the WAF to ignore. You can specify multiple signatures as a comma-separated list. -1. Select **Next**. - -{{< call-out "note" >}} -Refer to the [Secure Your Apps]({{< ref "/controller/app-delivery/security/_index.md" >}}) topics to learn more about WAF and the default protections provided by NGINX App Protect. -{{< /call-out>}} - -## Edit or Delete Apps and Components - -To view, edit, and delete Apps: - -1. Open the NGINX Controller user interface and log in. -1. Select the **NGINX Controller menu icon** > **Services** > **Apps**. -1. On the **Apps** menu, select **Overview**. The **Apps Overview** page is displayed and shows a list of your Apps. -1. To view the details for an App, including metrics data and components, select the App name in the list of Apps. -1. To edit the App, select **Edit Config** on the **Quick Actions** menu. -1. To delete the App, select **Delete Config** on the **Quick Action**s menu. - -To edit or delete a Component: - -1. Open the NGINX Controller user interface and log in. -1. Select the **NGINX Controller menu icon** > **Services** > **Apps**. -1. On the **Apps** menu, select **Overview**. The **Apps Overview** page is displayed and shows a list of your Apps. -1. Select the App that contains the Component that you want to modify. The App's **Overview** page is displayed. -1. In the details panel for your App, select **Components**. -1. On the **Components** page, select the Component that you want to modify. -1. To edit the Component, select **Edit Config** on the **Quick Actions** menu. -1. To delete the Component, select **Delete Config** on the **Quick Actions** menu. - -{{< versions "3.0" "latest" "ctrlvers" >}} -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/_index.md b/content/controller/app-delivery/security/_index.md deleted file mode 100644 index 54d5419bf..000000000 --- a/content/controller/app-delivery/security/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Secure your applications using F5 NGINX Controller App Security -title: App Security -weight: 100 -url: /nginx-controller/app-delivery/security/ ---- - diff --git a/content/controller/app-delivery/security/concepts/_index.md b/content/controller/app-delivery/security/concepts/_index.md deleted file mode 100644 index 097169f5b..000000000 --- a/content/controller/app-delivery/security/concepts/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: Overview of the App Security module and default policy -title: Learn About App Security -weight: 100 -url: /nginx-controller/app-delivery/security/concepts/ ---- - diff --git a/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md b/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md deleted file mode 100644 index 342bc0cb4..000000000 --- a/content/controller/app-delivery/security/concepts/app-sec-default-policy-original.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: Learn about the default protections provided by F5 NGINX Controller App - Security. -nd-docs: DOCS-479 -title: Default WAF Policy -toc: true -weight: 200 -type: -- concept -- reference ---- - -## Overview - -You can use the F5 NGINX Controller App Security module to configure and manage a web application firewall (WAF). The App Security WAF protects your applications from HTTP and web-based threats, including the [OWASP Top 10](https://owasp.org/www-project-top-ten/). - -NGINX Controller App Security provides out-of-the-box analytics events and metrics, which are reported through the NGINX Controller API and user interface. App Security works with [NGINX App Protect](https://www.nginx.com/products/nginx-app-protect), running NGINX Plus as the WAF in the data path. - -## Default Policy - -The default policy for App Security WAF in NGINX Controller focuses on [OWASP Top 10](https://owasp.org/www-project-top-ten/) protection. This policy is the same default policy that is used by NGINX App Protect. - -The default policy for NGINX Controller App Security WAF includes these security checks: - - - - - -| **Security Checks** | **Description** | -|---------------------------|-----------------| -| HTTP RFC compliance enforcement | Validation of HTTP requests to prevent the use of the HTTP protocol as an entry point for malicious requests to applications. | -| URL normalization | Decoding of requests for encoded request that contain different types of encoded escapes | -| Evasion techniques | Protection for techniques commonly used by hackers to access resources or evade what would otherwise be identified as an attack. The checks performed are:
  • Bad unescape (bad escaping)
  • Directory traversal
  • Bare byte decoding
  • Apache whitespace
  • Multiple % decoding
  • IIS Unicode codepoint
  • IIS backslashes
  • %u decoding
| -| Malformed cookie | Validates that the cookie format is RFC compliant. | -| Illegal status code | Responses in the 400–500 range -- except for `400`, `401`, `404`, `407`, `417`, `503` -- are rejected. | -| Request size exceeds the buffer | Requests that exceed the buffer size | -| Maximum length for URL, header, query string, cookie, and POST data | URL length: 2048
Header length: 4096
Query string length: 2048
Cookie length: 4096
Post data length: 4096

{{< call-out "note" >}} The whole request length is not checked. The entire request cannot exceed the maximum buffer size of 10 MB.{{< /call-out >}} | -| Disallowed file type extension | These file types are disallowed:
  • bak, bat, bck, bkp, cfg, conf, config, ini, log, old, sav, save, temp, tmp
  • bin, cgi, cmd, com, dll, exe, msi, sys, shtm, shtml, stm
  • cer, crt, der, key, p12, p7b, p7c, pem, pfx
  • dat, eml, hta, htr, htw, ida, idc, idq, nws, pol, printer, reg, wmz
| -| Allowed methods | Only these HTTP methods are allowed:
  • GET
  • HEAD
  • POST
  • PUT
  • PATCH
  • DELETE
  • OPTIONS
| -| Character/Metacharacter validation in URL and header | Metacharacters are checked in the URL and header. | -| Parameter parsing | NGINX Controller App Security auto-detects the payload type for JSON and XML. App Security then applies the signature that matches the correct format.| -| JSON format | If the content is JSON, then App Security checks that the JSON payload body is well-formed. The max structure depth and max array length may not exceed 25. The max structure depth and max array length may not exceed 25.

No JSON schema enforcement. | -| DTD XML format | If the content is XML, then App Security checks that an XML payload body is well-formed.

No XML schema enforcement. No SOAP and Web Services Security format enforcement. | - -## Attack Types Used In Default Policy - -The following signature attack types are included with the default NGINX Controller App Security WAF policy. These attack types protect against [OWASP Top 10](https://owasp.org/www-project-top-ten/) vulnerabilities and [CVEs](https://cve.mitre.org/). Low, medium, and high accuracy signatures generate events as part of assessing the [Violation Rating](#use-of-violation-ratings-in-default-policy). - -- Command Execution Signatures -- Cross-Site Scripting Signatures -- Directory Indexing Signatures -- Information Leakage Signatures -- OS Command Injection Signatures -- Path Traversal Signatures -- Predictable Resource Location Signatures -- Remote File Include Signatures -- SQL Injection Signatures -- Authentication/Authorization Attacks Signatures -- XML External Entity (XXE) Signatures -- XPath Injection Signatures -- Buffer Overflow Signatures -- Denial of Service Signatures -- Vulnerability Scanner Signatures - -## Use of Violation Ratings in Default Policy - -The default policy for App Security assesses violations and provides a Violation Rating. This rating is an NGINX App Protect computed assessment of the risk of the request and its likelihood of an attack based on the triggered violations. - -NGINX App Protect violations are rated to distinguish between attacks and potential false-positive alerts. A rating is assigned to requests based on the presence of one or more violations. Each violation type and severity contribute to the calculation of the Violation Rating assigned to a request. - -The possible Violation Ratings are: - -- 0: No violation (no event available) -- 1: Possible False Positive (no event available) -- 2: Most Likely False positive (no event available) -- 3: Needs examination -- 4: Possible Attack -- 5: Most Likely Attack - -The Violation Rating is a dimension in Security Violation Events. NGINX App Protect rejects requests that have a Violation Rating of `4 (Possible Attack)` or `5 (Most Likely an Attack)`. However, the following violations and signature sets have a low chance of being false positives and are, therefore, configured by default to reject the request regardless of its Violation Rating: - -- High accuracy attack signatures -- Threat campaigns -- Malformed request: unparsable header, malformed cookie, and malformed body (JSON or XML). - -{{< call-out "note" >}} - -With the default policy, all requests rejected by NGINX App Protect generate a Security Event in NGINX Controller. Requests with Violation Rating of `3 (Needs examination)` also generate a Security Event in NGINX Controller. All other requests do not generate a Security Event in NGINX Controller. - -{{< /call-out >}} - -## Additional Information - -### HTTP RFC Compliance Already Rejected By NGINX - -Note the following events are blocked by NGINX Plus and not by the NGINX Controller App Security policy. These events are not reported in NGINX Controller as security violation events. - -| **HTTP RFC Compliance Checks** | **Description** | -|--------------------------------|-----------------| -| Unparsable request content | This violation is triggered when the system's parser cannot parse the message. | -| Several Content-Length headers | More than one content-length header is a non-RFC violation. Indicates an HTTP response splitting attack. | -| NULL in header | The system issues a violation for requests with a NULL character in the header. | -| No Host header in HTTP/1.1 request | Check to see if HTTP/1/1 requests contain a "Host" header. | -| High ASCII characters in headers| Check for high ASCII characters (greater than 127) in headers. | -| Content length should be a positive number | The Content-Length header value should be greater than zero; only a numeric positive number value is accepted. | -| Bad HTTP version | Enforces legal HTTP version number (only 0.9 or higher allowed). | - -{{< versions "3.12" "latest" "ctrlvers" >}} diff --git a/content/controller/app-delivery/security/concepts/app-sec-metrics.md b/content/controller/app-delivery/security/concepts/app-sec-metrics.md deleted file mode 100644 index a07aa2fd8..000000000 --- a/content/controller/app-delivery/security/concepts/app-sec-metrics.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -description: Learn about the F5 NGINX Controller Application Security metrics and - events. -nd-docs: DOCS-480 -title: App Security Metrics -toc: true -weight: 400 -type: -- reference ---- - -## Overview - -This topic provides reference information for the metrics and events that F5 NGINX Controller reports for Application Security. - -## Security Metrics and Event Dimensions - -The following table shows the attributes and dimensions you can view and filter by for WAF violation events. - -{{}} - -| **Attribute** | **Possible Values** | **Description and Additional Information** | -|-------------|-----------|------| -| category | security violation | | -| timestamp | Timestamp of the request | UTC | -| message | | Provides summary info about if a request was rejected or flagged, from what source, and due to what attack types.| -| level | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` | Security violation events are only `INFO` for now| -| hostname | |Hostname used in the request| -| environment | | | -| app | | | -| component | | | -| gateway | | | -| corelationId | | | -| http.request_endpoint | | Request URI | -| http.request_method | | Method used for the request| -| request_outcome |`REJECTED`, `PASSED`| The outcome of the request after Controller App Security processed the request.| -| request_outcome_reason | `SECURITY_WAF_OK`, `SECURITY_WAF_VIOLATION`, `SECURITY_WAF_FLAGGED`, `SECURITY_WAF_BYPASS`, `SECURITY_NGINX_VIOLATION`, `SECURITY_WAF_FLAGGED` | request_outcome_reason provides the reason why App Security rejected or flagged a request to be reviewed. Outcome reasons for `SECURITY_WAF_BYPASSED` and `SECURITY_NGINX_VIOLATION` have not been implemented.
{{< call-out "note" >}} App Security Events are not created for requests that don't trigger any violations. This means you should not see Events with `outcome_reason = SECURITY_WAF_OK`.{{< /call-out >}}| -| http.response_code | | Response code returned to App Security. A `0` code is returned if App Security did not block the request.| -| http.hostname | | Hostname of request| -| http.remote_addr | | Client IP of the request| -| http.remote_port | | Port of the client initiating the request| -| http.server_addr | | Server IP address that NGINX is listening on| -| http.server_port | | Server IP port that NGINX is listening on| -| waf.http_request | | Request including header, body, etc.| -| waf.support_id | | ID seen on the App Security rejection page| -| waf.signature_ids | | ID list of signatures triggered with the request. It usually does not go above three signature IDs.| -| waf.signature_names | | Names of signatures triggered with the request. It usually does not go above three signature names.| -|waf.attack_types | | Attack types triggered by the request. It can be based on any of the above signature or other protection mechanisms used in the WAF policy. It usually does not go above three attack types.| -| violations ||Comma-separated list of logical violation names| -| sub_violation ||More specific violations within ‘HTTP protocol compliance failed’ (violation = `VIOL_HTTP_PROTOCOL`) and/or ‘Evasion technique detected’ violations (violation = `VIOL_EVASION`) | -| sig_cves||Signature CVEs value of the matched signatures.| -| is_truncated||A flag that returns true if a request is truncated in the security events, or false if it is not. | -| x_forwarded_for_header_value||X-Forwarded-For header information. This option is commonly used when proxies are involved to track the originator of the request.| - -{{< /bootstrap-table >}} - -### Attack Types and Description - -Each signature and violation has an Attack Type which is the attack vector WAF protects from. The list of Attack Types and descriptions are listed here. You may see these attack types and violations in Security Events and Metrics. - -{{}} - -| **Attack Type** | **Description** | -|-----------------|-----------------| -| Server-Side Template Injection | Some applications use server-side templates for better modularity. This attack occurs when a non-sanitized input containing template directives is embedded into a server-side template which then leads to the execution of the injected code when rendered. | -|Insecure File Upload | Many applications allow uploading files to the server, such as images or documents. An application that does not correctly restrict the type of the uploaded files or the upload folder path can be exploited by attackers to upload files, called ‘WebShells’, containing malicious code that later will be executed or override the server configuration.| -|NoSQL Injection|NoSQL databases are non-relational databases, and even though they do not use the SQL syntax, non-sanitized input might let attackers control the original query via a database-specific programming language.| -|Insecure Deserialization | This is an attack against an application that receives serialized objects. An application which does not restrict which objects might be deserialized could be exploited by attackers sending specific object called ‘gadgets’, that could trigger arbitrary code execution when deserialized.| -|XML External Entities (XXE)| This is a type of attack against an application that parses XML input. This attack occurs when XML input containing a reference to an external entity is processed by a weakly configured XML parser.| -|Server-Side Request Forgery (SSRF) | Some applications receive a URL as an input and use it to exchange data with another service. An attacker could provide special URLs to read or update internal resources such as localhost services, cloud metadata servers, internal network web applications or HTTP enabled databases.| -|Cache Poisoning| Cache poisoning is an attack against the integrity of an intermediate Web cache repository, in which genuine content cached for an arbitrary URL is replaced with spoofed content.| -|WebSocket Parser Attack | WebSocket parser attack targets the functionality of the WebSocket parser to crash it or force the parser to work abnormally.| -|GWT Parser Attack | This attack targets the functionality of the GWT parser to crash it or force the parser to work abnormally.| -|Cross-site Request Forgery | An attacker exploits the web application’s assumption and trust that the authenticated user is purposely sending requests to perform actions or commands, while the attacker is causing the user to send the commands without the user’s knowledge or consent.| -|JSON Parser Attack |This attack targets the functionality of the JSON parser to crash it or force the parser to work abnormally.| -|Malicious File Upload|Malicious file upload occurs when a user tries to upload a malicious file to the web application. This could allow remote attackers to cause Server Infection, Network Infection, Buffer Overflow, and Remote Comma Execution.| -|HTTP Response Splitting|Specially crafted HTTP messages can manipulate the webserver or cache’s standard behavior. This can lead to XSS, and cache poisoning.| -|Session Hijacking|An attacker can steal a valid web session from legitimate users to gain unauthorized access.| -|XML Parser Attack|This attack targets the functionality of the XML parser to crash it or force the parser to work abnormally.| -|Parameter Tampering|By changing certain parameters in a URL or web page form, attackers can successfully attack the web application business logic.| -|Injection Attempt|This is an attack where an attacker injects OS commands, active script commands (in JavaScript or any other scripting language), or SQL commands into various parts of an HTTP request, for the injected content to run on remote systems. The two most common injection attacks are SQL injection and Cross-Site Scripting.| -|Brute Force Attack|Brute-force attacks are mainly used for guessing passwords and bypassing access control of an application by executing many different attempts.| -|Forceful Browsing|This attack occurs when an attacker is directly accessing a URL, which could grant access to a restricted part of the web site.| -|HTTP Request Smuggling Attack|Specially crafted HTTP messages can manipulate the webserver or cache’s standard behavior. This can lead to XSS, and cache poisoning.| -|HTTP Parser Attack|HTTP parser attack targets the functionality of the HTTP parser to crash it or force the parser to work abnormally.| -|Other Application Activity|This attack does not belong to any specific attack category, however, it is a violation of the user-defined security policy.| -|Denial of Service|A denial-of-service (DoS) attack represents a family of attacks aimed to exhaust the application server resources up to a point that the application cannot respond to legitimate traffic, either because it has crashed, or because its slow response renders it effectively unavailable.| -|Cross-Site Scripting (XSS)|Cross-Site Scripting (XSS) occurs when a web application does not sanitize user-supplied input and places it directly into the page returned to the user. Usually, the attacker will submit malicious JavaScript, VBScript, ActiveX, HTML, or Flash code to the vulnerable website.| -|SQL-Injection|SQL-Injection occurs when a web application does not sanitize user-supplied input and places it directly into the SQL statement. This attack allows remote attackers to run SQL statements on the internal database.| -|Command Execution|Web applications can be tricked to execute operating system commands, injected from a remote machine if user-supplied input is not properly checked by the web application.| -|Server Side Code Injection|An attacker can submit server-side code by invalidated input. The webserver, when parsing malicious input, may execute operating system commands or access restricted files.| -|LDAP Injection|If user-supplied input is not correctly sanitized, the attacker could change the construction of LDAP statements. Successful exploitation results in information gathering, system integrity compromise, and possible modification of the LDAP tree.| -|XPath Injection|XPath-Injection occurs when a web application does not sanitize user-supplied input but places it directly into the XML document query. Successful exploitation results in information gathering and system integrity compromise.| -|Path Traversal|Path traversal can be used to bypass the webserver root and request various files, including system files or private directories and resources. This attack can lead to information disclosure and possible exposure of sensitive system information.| -|Directory Indexing|This is a directory listing attempt which can lead to information disclosure and possible exposure of sensitive system information. Directory Indexing attacks usually target webservers that are not correctly configured, or which have a vulnerable component that allows Directory Indexing.| -|Information Leakage|Sensitive information may be present within HTML comments, error messages, source code, or simply left in files that are accessible by remote clients. Besides, attackers can manipulate the application to reveal classified information like credit card numbers. This can lead to the disclosure of sensitive system information which may be used by an attacker to further compromise the system.| -|Predictable Resource Location|By making educated guesses, the attacker could discover hidden web site content and functionality, such as configuration, temporary, backup, or sample files. This can lead to the disclosure of sensitive system information which may be used by an attacker to compromise the system.| -|Buffer Overflow|Buffer Overflow could be triggered when data written to memory exceeds the allocated size of the buffer for that data. This could lead to the Denial of Service or arbitrary code execution.| -|Authentication/Authorization Attacks|Authentication/Authorization Attacks occur when a web site permits an attacker to access sensitive content or functionality without having to properly authenticate, or authorize, that resource.| -|Abuse of Functionality|Abuse of Functionality is an attack technique that uses a website’s features and functionality to consume, defraud, or circumvent access control mechanisms.| -|Vulnerability Scan|An attempt is made using an automatic tool to scan a webserver, or an application running on a webserver, for a possible vulnerability.| -|Detection Evasion|An attempt is made to evade detection of the attack on a webserver, by obfuscating the attack using various methods such as encodings and path manipulation.| -|Trojan/Backdoor/Spyware|This is an attack initiated by some form of malicious code.| -|Other Application Attacks|This is an attack which targets the web application and does not fall in any predefined category| -|Non-browser Client|An attempt is made by a non-browser client to explore the site.| -|Remote File Include|Remote File Inclusion attacks allow attackers to run arbitrary code on a vulnerable website.| - -{{< /bootstrap-table >}} - -### Violations and Descriptions - -Each violation consists of one or more security checks (for example, attack signatures, HTTP RFC compliance, and evasion techniques). Each security check could be a specific attack signature, a specific HTTP Compliance check, or a specific evasion technique that is triggered within WAF. - -{{}} - -| **Violation Value** | **Name** | **Description** | -|-----------------|-----------------|--------------------| -|`VIOL_ASM_COOKIE_MODIFIED` | Modified ASM cookie |The system checks that the request contains an ASM cookie that has not been modified or tampered with. Blocks modified requests.| -|`VIOL_ATTACK_SIGNATURE`|Attack signature detected|The system examines the HTTP message for known attacks by matching it against known attack patterns. See signature_ids and signature_names attributes for specific signatures matched.| -|`VIOL_COOKIE_EXPIRED`|Expired timestamp|The system checks that the timestamp in the HTTP cookie is not old. An old timestamp indicates that a client session has expired. Blocks expired requests. The timestamp is extracted and validated against the current time. If the timestamp is expired and it is not an entry point, the system issues the Expired Timestamp violation.| -|`VIOL_COOKIE_LENGTH`|Illegal cookie length|The system checks that the request does not include a cookie header that exceeds the acceptable length specified in the security policy.| -|`VIOL_COOKIE_MALFORMED`|Cookie not RFC-compliant|This violation occurs when HTTP cookies contain at least one of the following components:
  • Quotation marks in the cookie name
  • A space in the cookie name.
  • An equal sign (=) in the cookie name. Note: A space between the cookie name and the equal sign (=), and between the equal sign (=) and cookie value is allowed.
  • An equal sign (=) before the cookie name.
  • A carriage return (hexadecimal value of 0xd) in the cookie name.
  • | -|`VIOL_ENCODING`|Failed to convert character|The system detects that one of the characters does not comply with the configured language encoding of the web application’s security policy.| -|`VIOL_EVASION`|Evasion technique detected|This category contains a list of evasion techniques that attackers use to bypass detection.| -|`VIOL_FILETYPE`|Illegal file type|The system checks that the requested file type is configured as a valid file type, or not configured as an invalid file type, within the security policy. Only for disallowed file types.| -|`VIOL_HEADER_LENGTH`|Illegal header length|The system checks that the request includes a total HTTP header length that does not exceed the length specified in the security policy. The actual size in the default policy is 4 KB.| -|`VIOL_HEADER_METACHAR`|Illegal meta character in header|The system checks that the values of all headers within the request only contain meta characters defined as allowed in the security policy.| -|`VIOL_HTTP_PROTOCOL`|HTTP protocol compliance failed|This category contains a list of validation checks that the system performs on HTTP requests to ensure that the requests are formatted properly.| -|`VIOL_HTTP_RESPONSE_STATUS`|Illegal HTTP response status|The server response contains an HTTP status code that is not defined as valid in the security policy.| -|`VIOL_JSON_MALFORMED`|Malformed JSON data|The system checks that the request contains JSON content that is well-formed. Enforces parsable JSON requests.| -|`VIOL_METHOD`|Illegal method|The system checks that the request references an HTTP request method that is found in the security policy. Enforces desired HTTP methods; GET and POST are always allowed. These HTTP methods are supported: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS.| -|`VIOL_QUERY_STRING_LENGTH`|Illegal query string length|The system checks that the request contains a query string whose length does not exceed the acceptable length specified in the security policy. In * file type entity. The actual size is 2 KB.| -|`VIOL_REQUEST_MAX_LENGTH`|Request length exceeds defined buffer size|The system checks that the request length is not larger than the maximum memory buffer size of the ASM. Note that this is a BIG-IP unit parameter that protects the ASM from consuming too much memory across all security policies which are active on the device. Default is 10MB.| -|`VIOL_URL_LENGTH`|Illegal URL length|The system checks that the request is for a URL whose length does not exceed the acceptable length specified in the security policy. In * file type entity. The actual size is 2 KB.| -|`VIOL_URL_METACHAR`|Illegal meta character in URL|The system checks that the incoming request includes a URL that contains only meta characters defined as allowed in the security policy. Enforces the desired set of acceptable characters.| -|`VIOL_XML_FORMAT`|XML data does not comply with format settings|The system checks that the request contains XML data that complies with the various document limits within the defense configuration in the security policy’s XML profile. Enforces proper XML requests and the data failed format/defense settings such as the maximum document length. This violation is generated when a problem in an XML document is detected (for example, an XML bomb), generally checking the message according to boundaries such as the message’s size, maximum depth, and the maximum number of children.| -|`VIOL_XML_MALFORMED`|Malformed XML data|The system checks that the request contains XML data that is well-formed, according to W3C standards. Enforces proper XML requests.| -|`VIOL_RATING_THREAT`|Request is likely a threat|The combination of violations in this request determined that the request is likely to be a threat.| -|`VIOL_PARAMETER_NAME_METACHAR`|Illegal meta character in parameter name|The system checks that all parameter names within the incoming request only contain meta characters defined as allowed in the security policy.| -|`VIOL_PARAMETER_VALUE_METACHAR`|Illegal meta character in value|The system checks that all parameter values, XML element/attribute values, or JSON values within the request only contain meta characters defined as allowed in the security policy. Enforces proper input values.| - -{{< /bootstrap-table >}} - -### HTTP RFC Sub-violations and Descriptions - -The following table specifies the HTTP Compliance sub-violation settings. All are supported in NGINX, but not all are enabled in the default App Protect security template. The table specifies which. Some of the checks are enforced by NGINX Plus and App Protect only gets a notification. Note: In this case, the request is always blocked regardless of the App Protect policy. - -{{}} - -| **Sub-violation** | **Description** | -|-----------------|-----------------| -|Null in request (null in body, null in the header is done by NGINX Plus)|The system issues a violation for requests with a NULL character anywhere in the request (except for a NULL in the binary part of a multipart request).| -|Multiple host headers|Examines requests to ensure that they contain only a single “Host” header.| -|The host header contains IP address|The system verifies that the request’s host header value is not an IP address to prevent non-standard requests.| -|CRLF characters before request start|Examines whether there is a CRLF character before the request method. If there is, the system issues a violation.| -|Chunked request with Content-Length header|The system checks for a Content-Length header within chunked requests.| -|Check the maximum number of parameters|The system compares the number of parameters in the request to the maximum configured number of parameters. Maximum is set to 500.| -|Check the maximum number of headers|The system compares the request headers to the maximal configured number of headers. Maximum is set to 50.| -|Unescaped space in URL|The system checks that there is no unescaped space within the URL in the request line. Such spaces split URLs introducing ambiguity on picking the actual one.| -|Bad multipart/form-data request parsing|When the content type of a request header contains the substring “Multipart/form-data”, the system checks whether each multipart request chunk contains the strings “Content-Disposition” and “Name”. If they do not, the system issues a violation.| -|Bad multipart parameters parsing|The system checks the following:
    • A boundary follows immediately after request headers.
    • The parameter value matches the format: ‘name=”param_key”;rn.
    • A chunked body contains at least one CRLF.
    • A chunked body ends with CRLF.
    • Final boundary was found on multipart request.
    • There is no payload after final boundary.
    • If one of these is false, the system issues a violation.
    • | - -{{< /bootstrap-table >}} - -### Evasion Techniques and Description - -The following table specifies the Evasion Techniques sub-violation values and descriptions. - -{{}} - -| **Sub-violation** | **Description** | -|-----------------|-----------------| -|%u decoding|The system performs Microsoft %u Unicode decoding (%UXXXX where X is a hexadecimal digit). For example, the system turns a%u002fb to a/b. The system performs this action on URI and parameter input to evaluate if the request contains an attack.| -|Apache whitespace|The system detects the following characters in the URI: 9 (0x09), 11 (0x0B), 12 (0x0C), and 13 (0x0D).| -|Bad unescape|The system detects illegal HEX encoding. Reports unescaping errors (such as %RR).| -|Bare byte decoding|The system detects higher ASCII bytes (greater than 127).| -|Directory traversals|The system ensures that directory traversal commands like ../ are not part of the URL. While requests generated by a browser should not contain directory traversal instructions, sometimes requests generated by JavaScript have them.| -|IIS backslashes|The system normalizes backslashes (`\`) to slashes (`/`) for further processing.| -|IIS Unicode codepoints|The system handles the mapping of IIS specific non-ASCII codepoints. Indicates that, when a character is greater than ‘0x00FF’, the system decodes %u according to an ANSI Latin 1 (Windows 1252) code page mapping. For example, the system turns a%u2044b to a/b. The system performs this action on URI and parameter input.| -|Multiple decoding|The system decodes URI and parameter values multiple times according to the number specified before the request is considered an evasion. The maximum decoding is 3.| - -{{< /bootstrap-table >}} - -{{< versions "3.12" "latest" "ctrlvers" >}} diff --git a/content/controller/app-delivery/security/concepts/bring-your-own-policy.md b/content/controller/app-delivery/security/concepts/bring-your-own-policy.md deleted file mode 100644 index b9a72634e..000000000 --- a/content/controller/app-delivery/security/concepts/bring-your-own-policy.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -description: Learn how to use your own F5 WAF for NGINX policies with NGINX - Controller. -nd-docs: DOCS-481 -title: Bring Your Own WAF Policy -toc: true -weight: 300 -type: -- concept ---- - -## Overview - -You can use the App Security Add-on for F5 NGINX Controller ADC to bring your own ("BYO") NGINX App Protect policies into NGINX Controller. This lets you use your existing declarative JSON policies from NGINX App Protect to protect your ADC app components. - -A BYO NGINX App Protect policy lets you maintain consistent Security Policies across your F5 WAF and NGINX WAF deployments. For example, say you already use F5 BIG-IP Application Security Manager (ASM) or F5 Advanced WAF and are now adopting NGINX Controller App Security. You can convert your XML Security Policies to an NGINX App Protect policy by using the [NGINX App Protect Policy Converter tool](https://docs.nginx.com/nginx-app-protect/configuration/#policy-converter). - -To export a policy from F5 Advanced WAF or ASM, take the following steps: - -1. Convert your F5 XML security policy to an F5 WAF for NGINX declarative JSON policy using the [NGINX App Protect Policy Converter tool](https://docs.nginx.com/nginx-app-protect/configuration/#policy-converter). - {{< call-out "note" >}}We recommend using the Converter tool that corresponds with the most recent NGINX App Protect version.{{< /call-out >}} - -2. Use the NGINX App Protect declarative JSON policy as the WAF policy in NGINX Controller for your app component(s). - -  - -With a BYO NGINX App Protect policy, you can also provide customized security by crafting an F5 WAF for NGINX policy that specifies the security controls appropriate for your apps. For more information on how to configure an F5 WAF for NGINX policy, refer to the [NGINX App Protect Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration/). - -## Security Strategy for BYO NGINX App Protect Policy - -The BYO NGINX App Protect policy uses the concept of a [Security Strategy]({{< ref "/controller/app-delivery/security/concepts/what-is-waf.md#security-policy-and-security-strategy" >}}) - -With the BYO NGINX App Protect policy feature, you can specify the exact NGINX App Protect policy for the Security Strategy. Then, the Security Strategy can be shared across -- and referenced by -- multiple app components. -A Security Strategy can be comprised of various app-security-related Security Policies. NGINX Controller includes a custom F5 WAF for NGINX policy, which can be assigned to a Security Strategy. - -You can also add a BYO F5 WAF for NGINX policy in JSON format to NGINX Controller "as-is" for use in a Security Strategy. - - -An **App Component** contains a reference to a **Security Strategy**, which, in turn, references a Security Policy. This Security Policy contains the **F5 WAF for NGINX policy**. - -Refer to the topic [Enable WAF for a Component Using Your Own NGINX App Protect Policy]({{< ref "/controller/app-delivery/security/tutorials/add-app-security-with-waf.md#enable-waf-for-a-component-using-your-own-nap-policy-beta" >}}) to get started. - -## Limitations - -BYO NAP WAF policy currently has the following limitations: - -- The size of the BYO F5 WAF for NGINX policy that's referenced by app components may affect application performance. -- References to external files, such as the following, in the F5 WAF for NGINX JSON declarative policy are not supported: - - User Defined Signatures - - Security controls in external references - - Referenced OpenAPI spec files -- Cookie modification (`VIOL_COOKIE_MODIFIED`) is not supported. -- gRPC protection is not supported. -- Protection with partial security visibility: - - Not all security metrics dimensions are available for the following: - - Bot violations - - CSRF origin validation violations - - User-defined browser violations - -{{< versions "3.20" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md b/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md deleted file mode 100644 index c2bc242b2..000000000 --- a/content/controller/app-delivery/security/concepts/extend-app-security-snippets.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -description: Learn how to extend your App Security configurations using F5 NGINX Controller - Snippets. -nd-docs: DOCS-338 -title: Extend App Security with Snippets -toc: true -weight: 400 -type: -- concept -- reference ---- - -## Overview - -F5 NGINX Controller [Snippets]({{< ref "/controller/app-delivery/about-snippets.md" >}}) let you customize your NGINX configuration by adding NGINX directives that aren't represented by the NGINX Controller API. - -Snippets also let you customize App Security for your Components by adding NGINX App Protect directives that aren't present in the NGINX Controller API. You can use Snippets when [tuning your F5 WAF for NGINX performance]({{< ref "/controller/app-delivery/security/tutorials/tune-waf-for-app" >}}) as well. - -{{< call-out "caution" >}} -When you use Snippets to customize your NGINX configuration, your changes are applied to the `nginx.conf` file *as is*. NGINX Controller does not verify that your configuration is valid before applying the Snippet. - -We strongly recommend verifying Snippets in a lab environment before making any changes in production. -{{< /call-out >}} - -## App Security Usage Examples - -{{< call-out "caution" >}} -The examples provided here are intended for demonstration purposes only. -We strongly recommend verifying Snippets in a lab environment before making any changes in production. -{{< /call-out >}} - -### Define a Backup Location for Security Event Logs - -When you [enable WAF on a Component]({{< ref "/controller/app-delivery/security/tutorials/add-app-security-with-waf" >}}), all Security Events are sent to NGINX Controller logs via syslog. The following example uses the `app_protect_security_log` directive in a URI Snippet to define a local backup location for Security Event logs. You can also send Security Events to another syslog server or to `stderr` by inserting an additional URI Snippet with the `app_protect_security_log` directive. - -{{< call-out "caution" >}} -Using local files as a backup for Security Events may use up disk space and affect your system performance. In production environments, setting up a remote file or a remote syslog server for backup purposes are good alternatives to using a local backup. -{{< /call-out >}} - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "ingress": { - "uris": { - "/": { - } - }, - "gatewayRefs": [ - { - "ref": "/services/environments/environment-name/gateways/" - } - ] - }, - "security": { - "strategyRef": { - "ref": "/security/strategies/balanced_default" - }, - "waf": { - "isEnabled": true - } - }, - "backend": { - "workloadGroups": { - "servers": { - "uris": { - "https://test-01.example.com": { - }, - "https://test-02.example.com": { - } - } - } - } - }, - "configSnippets": { - "uriSnippets": [ - { - "directives": [ - { - "directive":"app_protect_security_log", - "args": ["/etc/controller-agent/configurator/auxfiles/log-default.json", "/var/log/app_protect/security.log"] - } - ] - } - ] - } - } -} -``` - -### Add Location of User-Defined Signature Definition File - -When using [Bring Your Own WAF Policy]({{< ref "/controller/app-delivery/security/concepts/bring-your-own-policy" >}}) in NGINX Controller, you can define a URI Snippet for a Gateway API to define the location for your User-Defined Signature Definition file. The User-Defined Signature can then be referenced in the custom F5 WAF for NGINX policy that you use for your Components. - -{{< call-out "note" >}} -The file that contains the signature definition must already exist on your F5 WAF for NGINX instances. For more information regarding User-Defined Signatures, refer to the [F5 WAF for NGINX Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#user-defined-signatures). -{{< /call-out >}} - -The following example adds a URI snippet to the Gateway API definition that provides the location of the User-Defined Signature Definition file. - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "httpSnippet": { - "directives": [ - { - "directive": "app_protect_user_defined_signatures", - "args": ["app_protect_user_defined_signature_def_01"] - } - ] - } - }, - "ingress": { - "uris": { - "": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} - -``` - -### Harden Security using Fail-Closed - -Setting NGINX App Protect to "fail-closed" drops application traffic when certain conditions exist. This setting lets you err on the side of greater security as opposed to convenience, providing better protection for your applications when NGINX App Protect is not available. - -The example below adds HTTP Snippets to the Gateway that set the following NGINX App Protect directives to `drop`, or "fail-closed": - -- `app_protect_failure_mode_action` -- `app_protect_compressed_requests_action` -- `app_protect_request_buffer_overflow_action` - -Refer to the [NGINX App Protect Configuration Guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#global-directives) for more information about these directives and the conditions to which each applies. - -```json -{ - "metadata": { - "name": "gateway-name" - }, - "desiredState": { - "configSnippets": { - "httpSnippet": { - "directives": [ - { - "directive": "app_protect_failure_mode_action", - "args": ["drop"] - }, - { - "directive": "app_protect_compressed_requests_action", - "args": ["drop"] - }, - { - "directive": "app_protect_request_buffer_overflow_action", - "args": ["drop"] - } - ] - } - }, - "ingress": { - "uris": { - "http://example.com:8000": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} - -``` - -## Tuning WAF Performance Usage Examples - -{{< call-out "caution" >}} -The examples provided here are intended for demonstration purposes only. -We strongly recommend verifying Snippets in a lab environment before making any changes in production. -{{< /call-out >}} - -## Set the Memory and CPU Threshold Values - -This example adds an HTTP Snippet to a Gateway to control the memory and CPU threshold values which determine when NGINX App Protect enters and exits failure mode. - -In *failure mode*, F5 WAF for NGINX stops processing app traffic. Traffic is either dropped or passed through, as determined by the `app_protect_failure_mode_action` directive. - -The example below directs F5 WAF for NGINX to enter failure mode when memory utilization or CPU utilization reaches 85% and to exit failure mode when memory or CPU utilization drops to 60%. - -```json -{ - "metadata": { - "name": "" - }, - "desiredState": { - "configSnippets": { - "httpSnippet": { - "directives": [ - { - "directive": "app_protect_physical_memory_util_thresholds", - "args": ["high=85 low=60"] - }, - { - "directive": "app_protect_cpu_thresholds", - "args": ["high=85 low=60"] - } - ] - } - }, - "ingress": { - "uris": { - "http://example.com:8000": {} - }, - "placement": { - "instanceRefs": [ - { - "ref": "/infrastructure/locations/unspecified/instances/" - } - ] - } - } - } -} -``` - -{{< versions "3.22" "latest" "adcvers" >}} diff --git a/content/controller/app-delivery/security/concepts/what-is-waf.md b/content/controller/app-delivery/security/concepts/what-is-waf.md deleted file mode 100644 index c03264384..000000000 --- a/content/controller/app-delivery/security/concepts/what-is-waf.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: Overview of the App Security module's WAF feature. -nd-docs: DOCS-483 -title: About App Security -toc: true -weight: 100 -type: -- concept -- reference ---- - -## Overview - -The App Security Add-on for F5 NGINX Controller ADC lets you protect your applications with a web applications firewall (WAF). The WAF protects your apps from a variety of application layer attacks such as [cross-site scripting (XSS)](https://www.f5.com/services/resources/glossary/cross-site-scripting-xss-or-css), [SQL injection](https://www.f5.com/services/resources/glossary/sql-injection), and [cookie poisoning](https://www.f5.com/services/resources/glossary/cookie-poisoning), among others. - -A WAF protects your web apps by filtering, monitoring, and blocking any malicious HTTP/S traffic traveling to the web application, and prevents any unauthorized data from leaving the app. It does this by adhering to a set of policies that help determine what traffic is malicious and what traffic is safe. Just as a proxy server acts as an intermediary to protect the identity of a client, a WAF operates in similar fashion but in the reverse—called a reverse proxy—acting as an intermediary that protects the web app server from a potentially malicious client. - -{{< call-out "note" >}} To learn more about what a WAF is and how it works, check out the F5 DevCentral video: [What is a Web Application Firewall?](https://www.youtube.com/watch?v=p8CQcF_9280){{< /call-out>}} - -  - -## How it works - -App Security on NGINX Controller provides an app‑centric self‑service model to address the security needs of modern apps. - -The App Security add-on uses the NGINX App Protect Web Application Firewall (F5 WAF for NGINX) enforcement engine on the data path (data plane). -When you enable WAF on an app component using NGINX Controller, a security policy (sets of security controls and enforcement logic) is deployed and applied to configured NGINX App Protect instances that process traffic for the app component. - -F5 WAF for NGINX inspects incoming traffic as specified in the Security Policy to identify potential threats. When malicious traffic is suspected or blocked, the NGINX Controller Analytics module logs security events and metrics. These are then included in the NGINX Controller Threat Visibility and Analytics reporting. - -{{< call-out "note" >}}To learn more, read the [Threat Visibility and Analytics](https://www.nginx.com/blog/threat-visibility-analytics-nginx-controller-app-security/) blog post on [nginx.com](https://nginx.com).{{< /call-out>}} - -{{< img src="/ctlr/img/cas-overview.png" title="" alt="Controller App Security Overview Image" width="75%">}} - -## Security Policy - -In NGINX Controller, the Security Policy contains an F5 WAF for NGINX policy. The F5 WAF for NGINX policy has security controls and settings in a declarative JSON format. The Security Policy defines the rules and settings for application traffic inspection, detection of malicious traffic, and handling violations when they occur. For more about creating, updating, or deleting Security Policies, see the [Policies API Reference](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#operation/listPolicies). - -When enabling WAF to protect your Apps, you can either add your own custom Security Policy or use the default Security Policy. - -## Security Strategy - -A Security Strategy is a logical container for multiple Security Policies. In a Security Strategy, you can reference a Security Policy that represents a security risk profile. For example, you can map low- or high-risk security profiles to different Security Strategies as you deem fit for your Apps' specific use case or organizational needs. - -When you enable security on the App Component, you can specify the Security Strategy to protect it. You can use the same Security Strategy across multiple app components. The Security Policy referenced in the Security Strategy detects and protects against malicious traffic to the App Component. - -- **App Component** references **Security Strategy**; -- **Security Strategy** references **Security Policy**. - -For more about creating, updating, or deleting Security Policies, see the [Strategies API Reference](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Strategies). - -You can use a custom Security Strategy to protect your Apps, or you can use NGINX Controller's default Security Strategy, which contains a pre-defined WAF policy. - -{{< call-out "note" >}} - -The `/services/strategies/balanced_default` endpoint was replaced by `/security/strategies/balanced_default` in NGINX Controller ADC v3.18. - -- Specify the `StrategyRef` setting with `/security/strategies/balanced_default` instead of `/services/strategies/balanced_default`. - -Refer to the AskF5 knowledge base article [K02089505](https://support.f5.com/csp/article/K02089505) for more information. - -{{< /call-out >}} - diff --git a/content/controller/app-delivery/security/tutorials/_index.md b/content/controller/app-delivery/security/tutorials/_index.md deleted file mode 100644 index 838458c47..000000000 --- a/content/controller/app-delivery/security/tutorials/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: How to deploy and configure the App Security module -title: Manage App Security -weight: 200 -url: /nginx-controller/app-delivery/security/tutorials/ ---- - diff --git a/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md b/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md deleted file mode 100644 index 74c0e66d3..000000000 --- a/content/controller/app-delivery/security/tutorials/add-app-security-with-waf.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -description: How to add F5 NGINX Controller App Security to your applications. -nd-docs: DOCS-484 -title: Manage App Security -toc: true -weight: 100 -type: -- concept -- reference ---- - -## Overview - -You can use the App Security add-on for F5 NGINX Controller ADC to enable Web Application Firewall (WAF) capabilities to protect your applications. WAF lets you flag or block suspicious requests or attacks. WAF can be added to individual app components. - - -## Before You Begin - -Before proceeding with this guide, complete the following tasks. -{{< call-out "note" >}}These steps may need to be completed by a user with admin permissions.{{< /call-out >}} - -1. [Add an NGINX App Protect instance]({{< ref "/controller/infrastructure/instances/add-nap-instance.md" >}}) to NGINX Controller. - -In addition, the following resources must exist in order to complete the steps in this topic: - -- [Environment]({{< ref "/controller/services/manage-environments.md" >}}) -- [Gateway]({{< ref "/controller/services/manage-gateways.md" >}}) -- [Certs]({{< ref "/controller/services/manage-gateways.md" >}}) (required if your Components use HTTPS) -- [App and Component(s)]({{< ref "/controller/app-delivery/manage-apps.md" >}}) - -## Enable WAF for a Component using the Default Security Strategy - -To enable WAF functionality for Application Security using the default security strategy, send a POST or PUT request to the Components endpoint, with a JSON object similar to the following: - -```json - "security": { - "waf": { - "isEnabled": true - } - } -``` - -{{< call-out "note" >}}You need READ access to the `/security/strategies/` API path to enable WAF on a component. By default, only users with an admin role have full access to all API endpoint resources.{{< /call-out >}} - -This JSON object should be added to the Component endpoint similar to the following example: - -```json -{ - "metadata": { - "name": "secure", - "displayName": "protected web server", - "description": "ProtectedWeb Server", - "tags": [ - "dev", - "protected" - ] - }, - "desiredState": { - "ingress": { - "gatewayRefs": [ - { - "ref": "/services/environments/dev/gateways/dev-gw" - } - ], - "uris": { - "/secure": { - "matchMethod": "PREFIX" - } - } - }, - "security": { - "strategyRef": { - "ref": "/security/strategies/balanced_default" - }, - "waf": { - "isEnabled": true - } - }, - "backend": { - "ntlmAuthentication": "DISABLED", - "preserveHostHeader": "DISABLED", - "workloadGroups": { - "farm": { - "locationRefs": [ - { - "ref": "/infrastructure/locations/unspecified" - } - ], - "loadBalancingMethod": { - "type": "ROUND_ROBIN" - }, - "uris": { - "http://{{workload-1}}:8080": { - "isBackup": false, - "isDown": false, - "isDrain": false, - "resolve": "DISABLED" - }, - "http://{{workload-2}}:8080": { - "isBackup": false, - "isDown": false, - "isDrain": false, - "resolve": "DISABLED" - }, - "http://{{workload-3}}:8080": { - "isBackup": false, - "isDown": false, - "isDrain": false, - "resolve": "DISABLED" - }, - "http://{{workload-4}}:8080": { - "isBackup": false, - "isDown": false, - "isDrain": false, - "resolve": "DISABLED" - } - } - } - } - }, - "logging": { - "errorLog": "ENABLED", - "accessLog": { - "state": "DISABLED", - "format": "" - } - } - } -} -``` - -## Enable WAF for a Component Using Your Own F5 WAF for NGINX Policy - -Instead of using NGINX Controller's default policy for WAF, you can [bring your own NGINX App Protect Policy]({{< ref "/controller/app-delivery/security/concepts/bring-your-own-policy.md" >}}) for use in a Security Strategy to protect your app components. - -To do so, you first need to upload your F5 WAF for NGINX declarative JSON policy to the Security Policy endpoint and reference it in a Security Strategy. Then, you can reference the Security Strategy in the Component where you are enabling WAF. - -### Upload your F5 WAF for NGINX Policy - -To upload your NGINX App Protect declarative JSON Policy to NGINX Controller, use an HTTP client like cURL and send a `PUT` request to the [Security Policy REST API}(https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/) -The JSON object should be similar to the example below: - -```json -{ - "metadata": { - "name": "yourPolicyName", - "displayName": "App Protect Policy", - "description": "my special NAP policy", - "tags": ["test1", "test2"] - }, - "desiredState": { - "content": {"policy": {"name": "/Common/yourPolicyName", "template": {"name": "POLICY_TEMPLATE_NGINX_BASE"}, "applicationLanguage": "utf-8", "enforcementMode": "blocking", "signatures": [{"signatureId": 123458888, "enabled": false}, {"signatureId": 200000098, "enabled": false}, {"signatureId": 200001475, "enabled": false}, {"signatureId": 200002595, "enabled": false}], "bot-defense": {"settings": {"isEnabled": false}}, "headers": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}, {"name": "*-bin", "type": "wildcard", "decodeValueAsBase64": "required"}, {"name": "Referer", "type": "explicit", "decodeValueAsBase64": "disabled"}, {"name": "Authorization", "type": "explicit", "decodeValueAsBase64": "disabled"}, {"name": "Transfer-Encoding", "type": "explicit", "decodeValueAsBase64": "disabled"}], "cookies": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}], "parameters": [{"name": "*", "type": "wildcard", "decodeValueAsBase64": "disabled"}]}} - } -} -``` - -### Create or Update a Security Strategy with a BYO F5 WAF for NGINX Policy - -You can create or update a Security Strategy that references a BYO F5 WAF for NGINX policy by sending a `PUT` request to the [Strategies REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/) endpoint. - -The JSON object should be similar to the example below: - -```json - -{ - "metadata": { - "name": "yourSecStrategyName", - "displayName": "Security Strategy", - "description": "my special security strategy", - "tags": [ - "tag1", - "tag2" - ] - }, - "desiredState": { - "content": { - "securityPolicyRef": "/security/policies/yourPolicyName" - } - } -} - -``` - -### Add a BYO F5 WAF for NGINX policy to an App Component - -To add your BYO NGINX App Protect Policy to your App(s), you need to add a reference to the Security Strategy that contains the policy to your App Component. - -To do so, send a `PUT` request to the [Components REST API](https://docs.nginx.com/nginx-controller/api/ctlr-adc-api/#tag/Components) endpoint. - -The JSON object should be similar to the example below: - -```json - - "security": { - "strategyRef": { - "ref": "/security/strategies/" - }, - "waf": { - "isEnabled": true, - } - } - -``` - -{{< call-out "note" >}} - -The following WAF security parameters are not supported in App Components that reference a custom Security Strategy: - -- `isMonitorOnly` -- `signatureOverrides` - -These preceding parameters are supported by NGINX Controller's default policy for WAF. - -{{< /call-out >}} - -  - -## Verify that WAF is Enabled - -Complete the tasks in this section to verify that the Web Application Firewall is active and processing traffic. - -To verify that WAF has been enabled by NGINX Controller App Security to protect your app component, send an HTTP GET request to the app component. - -**Example using NGINX Controller's default policy**: GET: `https://[gateway FQDN]/?a= + +{{< /raw-html >}} diff --git a/content/nginxaas-google/changelog.md b/content/nginxaas-google/changelog.md new file mode 100644 index 000000000..5fc9588b5 --- /dev/null +++ b/content/nginxaas-google/changelog.md @@ -0,0 +1,47 @@ +--- +title: "Changelog" +weight: 900 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/changelog/ + +--- + +Learn about the latest updates, new features, and resolved bugs in F5 NGINXaaS for Google Cloud. + +To see a list of currently active issues, visit the [Known issues]({{< ref "/nginxaas-google/known-issues.md" >}}) page. + +## October 13, 2025 + +- {{% icon-feature %}} **NGINXaaS for Google Cloud is generally available** + +We are pleased to announce the general availability of F5 NGINXaaS for Google Cloud. + +F5 NGINXaaS for Google Cloud is a fully managed load balancer and application delivery service that streamlines cloud-native application delivery without the operational complexity of managing infrastructure. This service simplifies the deployment of APIs, microservices, and web applications while enhancing performance, visibility, security, and scalability in Google Cloud. + +Key features include adaptive load balancing, advanced connectivity patterns for deployment strategies like blue-green and canary, detailed visibility with over 200 real-time metrics, and strong security controls such as role-based access control and end-to-end encryption. The service also consolidates technology with unified L4/L7 load balancing combined with advanced security and programmability into a single platform for enhanced operational efficiency. + +This announcement marks a significant step in application delivery modernization, empowering organizations to improve user experiences and achieve seamless integration with Google Cloud Monitoring. + +To learn more, refer to the following resources: + +- **Product Information:** + + - [F5 NGINXaaS for Google Cloud](https://www.f5.com/products/nginx/f5-nginxaas-for-google-cloud) + - [Overview and architecture]({{< ref "/nginxaas-google/overview.md" >}}) + - [Getting Started]({{< ref "/nginxaas-google/getting-started/prerequisites/" >}}) + +- **Blogs:** [F5 NGINXaaS for Google Cloud: Delivering resilient, scalable applications ](https://f5.com/company/blog/delivering-resilient-scalable-applications.html) +- **Webinars:** [Why F5 NGINXaaS for Google Cloud is a game changer](https://events.actualtechmedia.com/on-demand/1603/why-f5-nginxaas-for-google-cloud-is-a-game-changer/) + +[Visit the Google Cloud Marketplace](https://console.cloud.google.com/marketplace/product/f5-7626-networks-public/nginxaas-google-cloud) and start leveraging NGINXaaS for Google Cloud today! + + +## September 18, 2025 + +- {{% icon-feature %}} **NGINXaaS for Google Cloud Early Access** + + NGINXaaS for Google Cloud is now available in Early Access. This offering provides a fully managed, scalable, and secure solution for deploying and managing NGINX instances on Google Cloud. + + - To learn more about NGINXaaS for Google Cloud, see the [Overview and architecture]({{< ref "/nginxaas-google/overview.md" >}}) topic. + - To deploy NGINXaaS, see the [Getting Started]({{< ref "/nginxaas-google/getting-started/prerequisites/" >}}) guide. diff --git a/content/nginxaas-google/disaster-recovery.md b/content/nginxaas-google/disaster-recovery.md new file mode 100644 index 000000000..698879d2b --- /dev/null +++ b/content/nginxaas-google/disaster-recovery.md @@ -0,0 +1,45 @@ +--- +title: Disaster recovery +weight: 750 +toc: true +url: /nginxaas/google/disaster-recovery/ +type: +- how-to +draft: true +--- + + +This guide describes how to configure disaster recovery (DR) for F5 NGINXaaS for Google Cloud deployments. The deployment architecture ensures users can access backend application servers (upstreams) continuously from an alternative region if the primary NGINXaaS deployment becomes unavailable. + + +### Architecture Overview + + + +## Prerequisites + + +## Configure disaster recovery + + +### Step 1: Terrraform setup + + +### Step 2: Deploy prerequisite infrastructure + + +### Step 3: Configure app servers (upstreams) + + +### Step 4: Peer the VNets + + + +### Step 5: Deploy NGINXaaS for Google Cloud in each region + + +### Step 6: DNS and failover + + +## Failover process + diff --git a/content/nginxaas-google/get-help/_index.md b/content/nginxaas-google/get-help/_index.md new file mode 100644 index 000000000..b88413f1a --- /dev/null +++ b/content/nginxaas-google/get-help/_index.md @@ -0,0 +1,5 @@ +--- +title: Get help +weight: 700 +url: /nginxaas/google/get-help/ +--- diff --git a/content/nginxaas-google/get-help/support.md b/content/nginxaas-google/get-help/support.md new file mode 100644 index 000000000..542dcba28 --- /dev/null +++ b/content/nginxaas-google/get-help/support.md @@ -0,0 +1,41 @@ +--- +title: Support +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/get-help/support +type: +- how-to +--- + +## Contact F5 support + +To contact support about F5 NGINXaaS for Google Cloud (NGINXaaS): + +1. Go to the [MyF5 portal](https://my.f5.com) and log in with your F5 account. + +1. Go to the **Support** section and select **Create a Case**. + +1. Select **NGINX Products** as the product family. + +1. Select **NGINXaaS for Google Cloud** in the Product dropdown. + +1. Fill out the **Subject** and **Description** sections, and include the following details about the specific issue: + + 1. Identifying Information: + - Deployment identifier (Object ID and name) + - Account identifier/name + 1. Issue Details: + - A detailed description of the issue + - The specific resources involved (for example: deployment, configuration, certificate, and so on) + - The operation you were performing when the issue occurred (for example: creating a deployment, uploading a configuration, adding a certificate, and so on) + - Any error messages displayed + - The time when the issue occurred + +1. Fill out the fields in the **Additional information** section including: + + - **Subscription ID**: select your F5 or NGINX subscription ID (use `No subscription available` if you don't have one) + - **Assign a priority** + - **Reason for contact** + +1. Complete the **Contact details** section of your case and select **Submit**. diff --git a/content/nginxaas-google/get-help/troubleshooting.md b/content/nginxaas-google/get-help/troubleshooting.md new file mode 100644 index 000000000..f71139af2 --- /dev/null +++ b/content/nginxaas-google/get-help/troubleshooting.md @@ -0,0 +1,9 @@ +--- +title: Troubleshooting guide +toc: false +url: /nginxaas/google/get-help/troubleshooting +weight: 300 +draft: true +--- + + diff --git a/content/nginxaas-google/getting-started/_index.md b/content/nginxaas-google/getting-started/_index.md new file mode 100644 index 000000000..b94e09050 --- /dev/null +++ b/content/nginxaas-google/getting-started/_index.md @@ -0,0 +1,6 @@ +--- +title: Getting started +weight: 200 +draft: false +url: /nginxaas/google/getting-started/ +--- diff --git a/content/nginxaas-google/getting-started/create-deployment/_index.md b/content/nginxaas-google/getting-started/create-deployment/_index.md new file mode 100644 index 000000000..242d27e0a --- /dev/null +++ b/content/nginxaas-google/getting-started/create-deployment/_index.md @@ -0,0 +1,5 @@ +--- +title: Create a deployment +weight: 200 +url: /nginxaas/google/getting-started/create-deployment/ +--- diff --git a/content/nginxaas-google/getting-started/create-deployment/deploy-console.md b/content/nginxaas-google/getting-started/create-deployment/deploy-console.md new file mode 100644 index 000000000..99cdb2caa --- /dev/null +++ b/content/nginxaas-google/getting-started/create-deployment/deploy-console.md @@ -0,0 +1,110 @@ +--- +title: Deploy using the NGINXaaS Console +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/create-deployment/deploy-console/ +type: +- how-to +--- + +## Overview + +This guide explains how to deploy F5 NGINXaaS for Google Cloud (NGINXaaS) using [Google Cloud Console](https://console.cloud.google.com) and the NGINXaaS Console. The deployment process involves creating a new deployment, configuring the deployment, and testing the deployment. + +## Before you begin + +Before you can deploy NGINXaaS, follow the steps in the [Prerequisites]({{< ref "/nginxaas-google/getting-started/prerequisites/" >}}) topic to subscribe to the NGINXaaS for Google Cloud offering in the Google Cloud Marketplace. + +### Create a network attachment + +NGINXaaS requires a [network attachment](https://cloud.google.com/vpc/docs/about-network-attachments) to connect your consumer Virtual Private Cloud (VPC) network and your NGINXaaS deployment's VPC network. + +1. Access the [Google Cloud Console](https://console.cloud.google.com/). +1. Create a consumer VPC network and subnetwork. See [Google's documentation on creating a VPC and subnet](https://cloud.google.com/vpc/docs/create-modify-vpc-networks#console_1) for a step-by-step guide. + - The region you choose in this step must match the region where your NGINXaaS deployment will be created. +1. Create a network attachment in your new subnet that automatically accepts connections. See [Google's documentation on creating a network attachment](https://cloud.google.com/vpc/docs/create-manage-network-attachments#console_1) for a step-by-step guide. +1. Make a note of the network attachment ID. You will need it in the next steps to create your NGINXaaS deployment. + + {{< call-out "caution" >}}NGINXaaS for Google Cloud currently supports the following regions: + + {{< table "table" >}} + |NGINXaaS Geography | Google Cloud Regions | + |-----------|---------| + | US | us-west1, us-east1, us-central1 | + | EU | europe-west2, europe-west1 | + {{< /table >}} + + {{< /call-out >}} + +## Access the NGINXaaS Console + +Once you have completed the subscription process and created a network attachment, you can access the NGINXaaS Console. + +- Visit [https://console.nginxaas.net/](https://console.nginxaas.net/) to access the NGINXaaS Console. +- Log in to the console with your Google credentials. +- Select the appropriate Geography to work in, based on the region your network attachment was created in. + +## Create or import an NGINX configuration + +{{< include "/nginxaas-google/create-or-import-nginx-config.md" >}} + +## Create a new deployment + +Next, create a new NGINXaaS deployment using the NGINXaaS Console: + +1. On the left menu, select **Deployments**. +1. Select {{< icon "plus" >}} **Add Deployment** to create a new deployment. + + - Enter a **Name**. + - Add an optional description for your deployment. + - Change the **NCU Capacity** if needed. + - The default value of `20 NCU` should be adequate for most scenarios. + - In the Cloud Details section, enter the network attachment ID that [you created earlier](#create-a-network-attachment) or select it in the **Network attachment** list. + - The network attachment ID is formatted like the following example: `projects/my-google-project/regions/us-east1/networkAttachments/my-network-attachment`. + - In the Apply Configuration section, select an NGINX configuration [you created earlier](#create-or-import-an-nginx-configuration) from the **Choose Configuration** list. + - Select a **Configuration Version** from the list. + - Select **Submit** to begin the deployment process. + +Your new deployment will appear in the list of deployments. The status of the deployment will be "Pending" while the deployment is being created. Once the deployment is complete, the status will change to "Ready". + +## Configure your deployment + +In the NGINXaaS Console, + +1. To open the details of your deployment, select its name from the list of deployments. + - You can view the details of your deployment, including the status, region, network attachment, NGINX configuration, and more. +1. Select **Edit** to modify the deployment description, and NCU Capacity. + - You can also configure monitoring from here. Detailed instructions can be found in [Enable Monitoring]({{< ref "/nginxaas-google/monitoring/enable-monitoring.md" >}}) +1. Select **Update** to save your changes. +1. Select the Configuration tab to view the current NGINX configuration associated with the deployment. +1. Select **Update Configuration** to change the NGINX configuration associated with the deployment. +1. To modify the contents of the NGINX configuration, see [Update an NGINX Configuration]({{< ref "/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md#update-an-nginx-configuration" >}}). + +## Set up connectivity to your deployment + +To set up connectivity to your NGINXaaS deployment, you will need to configure a [Private Service Connect backend](https://cloud.google.com/vpc/docs/private-service-connect-backends). + +1. Access the [Google Cloud Console](https://console.cloud.google.com/). +1. Create a public IP address. See [Google's documentation on reserving a static address](https://cloud.google.com/load-balancing/docs/tcp/set-up-ext-reg-tcp-proxy-zonal#console_3) for a step-by-step guide. +1. Create a Private Service Connect Network Endpoint Group (PSC NEG). See [Google's documentation on creating a NEG](https://cloud.google.com/vpc/docs/access-apis-managed-services-private-service-connect-backends#console) for a step-by-step guide. + - Set **Network endpoint group type** to **Private Service Connect NEG (Regional)**. + - Set **Taget** to **Published service**. + - For **Target service**, enter your NGINXaaS deployment's Service Attachment, which is visible on the `Deployment Details` section for your deployment. + - For **Producer port**, enter the port your NGINX server is listening on. If you're using the default NGINX config, enter port `80`. + - For **Network** and **Subnetwork** select your consumer VPC network and subnet. +1. Create a proxy-only subnet in your consumer VPC. See [Google's documentation on creating a proxy-only subnet](https://cloud.google.com/load-balancing/docs/tcp/set-up-ext-reg-tcp-proxy-zonal#console_1) for a step-by-step guide. +1. Create a regional external proxy Network Load Balancer. See [Google's documentation on configuring the load balancer](https://cloud.google.com/load-balancing/docs/tcp/set-up-ext-reg-tcp-proxy-zonal#console_6) for a step-by-step guide. + - For **Network**, select your consumer VPC network. + - For **Backend configuration**, follow [Google's step-by-step guide to add a backend](https://cloud.google.com/vpc/docs/access-apis-managed-services-private-service-connect-backends#console_5). + - In the **Frontend configuration** section, + - For **IP address**, select the public IP address created earlier. + - For **Port number**, enter the same port as your NEG's Producer port, for example, port `80`. + +## Test your deployment + +1. To test your deployment, go to the IP address created in [Set up connectivity to your deployment]({{< ref "/nginxaas-google/getting-started/create-deployment/deploy-console.md#set-up-connectivity-to-your-deployment" >}}) using your favorite web browser. + +## What's next + +[Manage your NGINXaaS users]({{< ref "/nginxaas-google/getting-started/manage-users-accounts.md" >}}) diff --git a/content/nginxaas-google/getting-started/create-deployment/deploy-terraform.md b/content/nginxaas-google/getting-started/create-deployment/deploy-terraform.md new file mode 100644 index 000000000..81d2e27b0 --- /dev/null +++ b/content/nginxaas-google/getting-started/create-deployment/deploy-terraform.md @@ -0,0 +1,29 @@ +--- +title: Deploy using Terraform +weight: 300 +toc: true +url: /nginxaas/google/getting-started/create-deployment/deploy-terraform/ +type: +- how-to +draft: true +--- + +## Overview + +F5 NGINXaaS for Google Cloud (NGINXaaS) deployments can be managed using Terraform. This document outlines common Terraform workflows for NGINXaaS. + +## Prerequisites + +{{< include "/nginxaas-google/terraform-prerequisites.md" >}} + +## Create a deployment + + +## Delete a deployment + + +## Additional resources + +- If you're just starting with Terraform, you can learn more on their [official website](https://www.terraform.io/). + +{{< include "/nginxaas-google/terraform-resources.md" >}} \ No newline at end of file diff --git a/content/nginxaas-google/getting-started/manage-users-accounts.md b/content/nginxaas-google/getting-started/manage-users-accounts.md new file mode 100644 index 000000000..5655a632d --- /dev/null +++ b/content/nginxaas-google/getting-started/manage-users-accounts.md @@ -0,0 +1,71 @@ +--- +title: Manage users and accounts +weight: 300 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/manage-users-accounts/ +type: +- how-to +--- + +## Overview + +This document explains how to manage users and accounts in F5 NGINXaaS for Google Cloud using the NGINXaaS console. + +Before you start, ensure you understand the following concepts: + +- **NGINXaaS Account**: An NGINXaaS Account is created when you subscribe to *F5 NGINXaaS for Google Cloud* via the Google Cloud Marketplace, as described in [prerequisites]({{< ref "/nginxaas-google/getting-started/prerequisites.md" >}}). You may create multiple NGINXaaS Accounts by signing up with different GCP billing accounts. +- **User**: NGINXaaS Users are granted access to all resources in the NGINXaaS Account. User authentication is performed securely via Google Cloud, requiring a matching identity. Individuals can be added as users to multiple NGINXaaS Accounts, and can switch between them using the steps documented below. +- **Authorized Domains**: The list of domains allowed to authenticate into the NGINXaaS Account using Google authentication. + - This can be used to restrict access to Google identities within your Google Cloud Organization or Google Workspace, or other known, trusted Workspaces. For example, your Google Cloud Organization may have users created under the `example.com` domain. By setting the Authorized Domains in your NGINXaaS Account to only allow `example.com`, users attempting to log in with the same email associated with `alternative.net` Google Workspace would not be authenticated. + - By default, an NGINXaaS Account has an empty authorized domains list, which accepts matching users from any Google Workspace. + +## Add or edit a user + +An existing NGINXaaS Account user can add additional users following these steps: + +1. Access the [NGINXaaS Console](https://console.nginxaas.net/). +1. Log in to the console with your Google credentials. +1. Navigate to **Users** page on the left menu, then select **Add User**. +1. Enter the **Email** address for the user to be added. The email must match the individual's Google User to be able to authenticate successfully. +1. Select **Create User** to save the changes. + +The new user will appear in the list of users on the **Users** page. Their **Google Identity Domain** will remain empty until they log in for the first time. + +## Modify account settings + +As an authenticated user, you may modify the authorized domains and name of an NGINXaaS Account. + + +### Modify Authorized Domains + +1. Select **Account Details** under the **Settings** section on the left menu. +1. Select **Edit** in the **Authorized Domains** section. +1. To add a new authorized domain, select **Add Domain** and enter the new domain. +1. To remove an existing authorized domain, select the Recycle Bin button next to it. +1. Select **Update** to save changes. + +{{< call-out "note" >}}You cannot remove an authorized domain from the list if it matches an existing user's Google Identity Domain. To remove access from that domain you must first delete every NGINXaaS user that is associated with the domain.{{< /call-out >}} + +### Modify the name of an account + +1. Select **Account Details** under the **Settings** section on the left menu. +2. Select **Edit** in the **Account Info** section. +3. Enter new name in **Account Name** field, then select **Update** to save changes. + +## Switch accounts + +To switch to a different NGINXaaS Account, select the profile symbol in the top right corner and choose **Switch Account**. This opens a page showing the list of all the NGINXaaS Accounts that your Google Identity is linked to; select the account you want to switch to. + +## Delete a user + +An authenticated user can delete other users (other than their own user account). Deletion is irreversible; the deleted user will no longer be able to access the NGINXaaS Account. + +To delete a user in an NGINXaaS Account: + +1. Select **Account Details** under the **Settings** section on the left menu. +1. Select the ellipsis (three dots) menu next to the user you want to delete. +1. Select **Delete** in the menu. The deleted user will no longer appear in the **Users** page. + +## What's next +[Add certificates using the NGINXaaS Console]({{< ref "/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md" >}}) \ No newline at end of file diff --git a/content/nginxaas-google/getting-started/manual-scaling.md b/content/nginxaas-google/getting-started/manual-scaling.md new file mode 100644 index 000000000..8038baa4a --- /dev/null +++ b/content/nginxaas-google/getting-started/manual-scaling.md @@ -0,0 +1,55 @@ +--- +title: Scale your deployment +weight: 400 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/manual-scaling/ +type: +- how-to +--- + +F5 NGINXaaS for Google Cloud (NGINXaaS) supports manual scaling of your deployment, allowing you to adapt to application traffic demands while controlling cost. + +An NGINXaaS deployment can be scaled out to increase the capacity or scaled in to decrease the capacity. Capacity is measured in [NGINX Capacity Units (NCU)](#nginx-capacity-unit-ncu). + +In this document you will learn: + +- What an NGINX Capacity Unit (NCU) is +- How to manually scale your deployment +- What capacity restrictions apply for your plan +- How to monitor capacity usage +- How to estimate the amount of capacity to provision + +## NGINX Capacity Unit (NCU) + +An NGINX Capacity Unit (NCU) quantifies the capacity of an NGINX instance based on the underlying compute resources. This abstraction allows you to specify the desired capacity in NCUs without having to consider the regional hardware differences. + +## Manual scaling + +To update the capacity of your deployment using the console, + +In the NGINXaaS Console, + +1. On the left menu, select **Deployments**. +2. Select the deployment you wish to edit the NCU capacity for. +3. On the **Details** tab, select the **Edit** button on the right to open the Edit Deployment Metadata pane + - Enter the desired value for the **NCU Capacity** under **Scale**. + - Select **Update** to begin the scaling process. + +The status of the deployment will be "Pending" while the deployment's capacity is being changed. Once the requested capacity provisioning is complete, the status will change to "Ready". + + {{< call-out "note" >}}There's no downtime while an NGINXaaS deployment changes capacity.{{< /call-out >}} + +## Capacity restrictions + +The following table outlines constraints on the specified capacity based on the chosen Marketplace plan, including the minimum capacity required for a deployment to be highly available, and the maximum capacity. By default, an NGINXaaS for Google Cloud deployment will be created with a capacity of 20 NCUs. + +{{}} + +| **Marketplace Plan** | **Minimum Capacity (NCUs)** | **Maximum Capacity (NCUs)** | +|------------------------------|-----------------------------|-----------------------------| +| Enterprise plan(s) | 10 | 100 | + +{{}} + +{{< call-out "note" >}}If you have higher capacity needs than the maximum capacity, please [open a request](https://my.f5.com/manage/s/) and specify the Resource ID of your NGINXaaS deployment, the region, and the desired maximum capacity you wish to scale to.{{< /call-out >}} diff --git a/content/nginxaas-google/getting-started/nginx-configuration/_index.md b/content/nginxaas-google/getting-started/nginx-configuration/_index.md new file mode 100644 index 000000000..7d6942d00 --- /dev/null +++ b/content/nginxaas-google/getting-started/nginx-configuration/_index.md @@ -0,0 +1,5 @@ +--- +title: Upload an NGINX configuration +weight: 500 +url: /nginxaas/google/getting-started/nginx-configuration/ +--- diff --git a/content/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md b/content/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md new file mode 100644 index 000000000..e187b0daf --- /dev/null +++ b/content/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md @@ -0,0 +1,37 @@ +--- +title: Create or upload using the NGINXaaS Console +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/nginx-configuration/nginx-configuration-console/ +type: +- how-to +--- + +You can apply an NGINX configuration to your F5 NGINXaaS for Google Cloud (NGINXaaS) deployment using the NGINXaaS Console. + +## Prerequisites + +- If you haven't done it yet, follow the steps in the [Prerequisites]({{< ref "/nginxaas-google/getting-started/prerequisites/" >}}) topic to subscribe to the NGINXaaS for Google Cloud offer in the Google Cloud Marketplace. + +## Access the NGINXaaS Console + +{{< include "/nginxaas-google/access-console.md" >}} + +## Create or import an NGINX configuration + +{{< include "/nginxaas-google/create-or-import-nginx-config.md" >}} + +## Update an NGINX configuration + +{{< include "/nginxaas-google/update-nginx-config.md" >}} + +## Delete NGINX configuration Files + +1. On the left menu, select **Configurations**. +1. On the list of configurations, select the ellipses (three dots) icon next to the configuration you want to delete. +1. Select **Delete**. +1. Confirm that you want to delete the configuration. + +## What's next +[Monitor your deployment]({{< ref "/nginxaas-google/monitoring/enable-monitoring.md" >}}) \ No newline at end of file diff --git a/content/nginxaas-google/getting-started/nginx-configuration/nginx-configurations-terraform.md b/content/nginxaas-google/getting-started/nginx-configuration/nginx-configurations-terraform.md new file mode 100644 index 000000000..1e380af86 --- /dev/null +++ b/content/nginxaas-google/getting-started/nginx-configuration/nginx-configurations-terraform.md @@ -0,0 +1,31 @@ +--- +title: Upload using Terraform +weight: 300 +toc: true +url: /nginxaas/google/getting-started/nginx-configuration/nginx-configurations-terraform/ +type: +- how-to +draft: true +--- + + +## Overview + +F5 NGINXaaS for Google Cloud (NGINXaaS) configurations can be managed using Terraform. This document outlines common Terraform workflows for NGINXaaS. + +## Prerequisites + +{{< include "/nginxaas-google/terraform-prerequisites.md" >}} + +## Upload an NGINX configuration + + +## Manage an NGINX configuration + + +## Delete a deployment + + +## Additional resources + +{{< include "/nginxaas-google/terraform-resources.md" >}} diff --git a/content/nginxaas-google/getting-started/nginx-configuration/overview.md b/content/nginxaas-google/getting-started/nginx-configuration/overview.md new file mode 100644 index 000000000..ed35664be --- /dev/null +++ b/content/nginxaas-google/getting-started/nginx-configuration/overview.md @@ -0,0 +1,915 @@ +--- +title: Overview +weight: 50 +toc: true +url: /nginxaas/google/getting-started/nginx-configuration/overview/ +nd-content-type: reference +nd-product: N4GC +--- + +This document provides details about using NGINX configuration files with your +F5 NGINXaaS for Google Cloud deployment, restrictions, and available directives. + +## NGINX configuration common user workflows + +NGINX configurations can be uploaded to your NGINXaaS for Google Cloud deployment using the console. The following documents provide detailed steps on how to upload NGINX configurations: + +- [Upload using the console]({{< ref "/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md" >}}) + +The topics below provide information on NGINX configuration restrictions and directives that are supported by NGINXaaS for Google Cloud when using any of the above workflows. + +## NGINX configuration required content + +F5 NGINXaaS for Google Cloud requires a few specific configuration statements to be included in order for the NGINXaaS deployment to function properly when applied. All of these are included in the "F5 NGINXaaS Default" config, which is the recommended starting config to create. + +1. There must be an http server block (which can be referenced via `include` statement) with the following contents: + + ```nginx + server { + listen 49151; + access_log off; + location /api { + api write=on; + allow 127.0.0.1; + deny all; + } + location /ready { + return 200; + } + } + ``` + + This server block enables NGINXaaS to access the [NGINX Plus monitoring API]({{< ref "nginx/admin-guide/monitoring/live-activity-monitoring.md" >}}) for constant monitoring of the NGINXaaS deployment's health and collection of metrics. It also exposes a simple readiness endpoint for NGINXaaS to regularly validate the availability of the NGINXaaS deployment. Without this content, a deployment using this config will likely report an unhealthy or failed status. + +1. The following top-level NGINX directives must exactly match these configuration settings: + + ```nginx + user nginx; + worker_processes auto; + pid /run/nginx/nginx.pid; + ``` + +NGINXaaS deployments must run as the `nginx` user for proper functionality and security. +`worker_processes` should be explicitly set to `auto` to guarantee optimal performance at any deployment scale. + +Using any other file path for the `pid` directive may result in a failure to apply the config. + +## NGINX filesystem restrictions + +There are limits to where files, including NGINX configuration files, certificate files, and any other files uploaded to the deployment, can be placed on the filesystem. There are also limits on what directories NGINX can access during runtime. These limits help support the separation of roles, enforce the principal of least privilege, and ensure the smooth operation of the system. + +{{}} + | Allowed Directory | User can upload files to | NGINX master process can read | NGINX master process can write | NGINX worker process can read | NGINX worker process can write | + | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | + | /etc/nginx | {{< icon "check" >}} | {{< icon "check" >}} | | | | + | /opt | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | + | /srv | {{< icon "check" >}} | {{< icon "check" >}} | | {{< icon "check" >}} | | + | /tmp | | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | + | /spool/nginx | | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | + | /var/cache/nginx | | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | + | /var/spool/nginx | | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | {{< icon "check" >}} | + | /var/www | {{< icon "check" >}} | {{< icon "check" >}} | | {{< icon "check" >}} | | +{{< /table >}} + +For example, `/etc/nginx` is only readable by the NGINX master process, making it a secure location for certificate files that won't be accidentally served due to configuration errors. `/var/www` is a secure location for static content because the NGINX worker process can serve files from it but cannot modify them, ensuring content integrity. `/tmp` is a good choice for storing temporary files with `proxy_temp_path` or `client_body_temp_path` since it is writable by the NGINX worker process. + +If you need access to additional directories, please [contact us]({{< ref "/nginxaas-google/get-help.md" >}}). + +## Disallowed configuration directives + +The following directives are not supported because of specific limitations. If you include any of these directives in your NGINX configuration, you'll get an error. + +{{< table >}} +| Disallowed Directive | Reason | +|------------------ | ----------------- | +| ssl_engine | No hardware SSL accelerator is available. | +| debug_points | NGINXaaS does not provide access to NGINX processes for debugging. | +| fastcgi_bind
      grpc_bind
      memcached_bind
      proxy_bind
      scgi_bind
      uwsgi_bind | Source IP specification for active-active deployments is not allowed. | +| quic_bpf | QUIC connection migration is not currently supported for active-active deployments. | +{{< /table >}} + +You may find a few directives are not listed here as either allowed or disallowed. Our team is working on getting these directives supported soon. + +## Directives that cannot be overridden + +The following directives cannot be overridden by the user provided configuration. + +{{< table >}} +| Persistent Directive | Value | Reason | +|------------------ | ----------------------- | -----------------| +| `user` | `nginx` | The `nginx` user has the correct permissions for accessing certificates, policy files and other auxfiles. | +| `worker_processes` | `auto` | Set to `auto` to automatically set `worker_processes` to the number of CPU cores. | +| `pid` | `/run/nginx/nginx.pid` | Set to this value to allow NGINXaaS to automatically manage the NGINX master process. | +| `daemon` | `on` | Automatically set to `on` to allow NGINXaaS to manage the NGINX master process. | +| `master_process` | `on` | This directive is intended for NGINX developers. | +| `worker_cpu_affinity` | `auto` | The value `auto` allows binding worker processes automatically to available CPUs based on the current capacity of the deployment. | +{{< /table >}} + +## Configuration directives list + +NGINXaaS supports a limited set of NGINX directives. + +{{< details summary="Alphabetical index of directives">}} + +[absolute_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#absolute_redirect)\ +[accept_mutex](https://nginx.org/en/docs/ngx_core_module.html#accept_mutex)\ +[accept_mutex_delay](https://nginx.org/en/docs/ngx_core_module.html#accept_mutex_delay)\ +[access_log (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log)\ +[access_log (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#access_log)\ +[add_after_body](https://nginx.org/en/docs/http/ngx_http_addition_module.html#add_after_body)\ +[add_before_body](https://nginx.org/en/docs/http/ngx_http_addition_module.html#add_before_body)\ +[add_header](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header)\ +[add_trailer](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_trailer)\ +[addition_types](https://nginx.org/en/docs/http/ngx_http_addition_module.html#addition_types)\ +[aio](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio)\ +[aio_write](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio_write)\ +[alias](https://nginx.org/en/docs/http/ngx_http_core_module.html#alias)\ +[allow (ngx_http_access_module)](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow)\ +[allow (ngx_stream_access_module)](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#allow)\ +[ancient_browser](https://nginx.org/en/docs/http/ngx_http_browser_module.html#ancient_browser)\ +[ancient_browser_value](https://nginx.org/en/docs/http/ngx_http_browser_module.html#ancient_browser_value)\ +[auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic)\ +[auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file)\ +[auth_delay](https://nginx.org/en/docs/http/ngx_http_core_module.html#auth_delay)\ +[auth_http](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http)\ +[auth_http_header](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_header)\ +[auth_http_pass_client_cert](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_pass_client_cert)\ +[auth_http_timeout](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_timeout)\ +[auth_jwt](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt)\ +[auth_jwt_claim_set](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_claim_set)\ +[auth_jwt_header_set](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_header_set)\ +[auth_jwt_key_cache](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache)\ +[auth_jwt_key_file](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file)\ +[auth_jwt_key_request](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request)\ +[auth_jwt_leeway](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_leeway)\ +[auth_jwt_require](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require)\ +[auth_jwt_type](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type)\ +[auth_oidc](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#auth_oidc)\ +[auth_request](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request)\ +[auth_request_set](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request_set)\ +[autoindex](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex)\ +[autoindex_exact_size](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_exact_size)\ +[autoindex_format](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_format)\ +[autoindex_localtime](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_localtime)\ +[break](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#break)\ +[connect_timeout](https://nginx.org/en/docs/ngx_mgmt_module.html#connect_timeout)\ +[charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset)\ +[charset_map](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset_map)\ +[charset_types](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset_types)\ +[chunked_transfer_encoding](https://nginx.org/en/docs/http/ngx_http_core_module.html#chunked_transfer_encoding)\ +[client_body_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size)\ +[client_body_in_file_only](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only)\ +[client_body_in_single_buffer](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_single_buffer)\ +[client_body_temp_path](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_temp_path)\ +[client_body_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout)\ +[client_header_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size)\ +[client_header_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout)\ +[client_id](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#client_id)\ +[client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size)\ +[client_secret](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#client_secret)\ +[config_url](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#config_url)\ +[connection_pool_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#connection_pool_size)\ +[cookie_name](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#cookie_name)\ +[create_full_put_path](https://nginx.org/en/docs/http/ngx_http_dav_module.html#create_full_put_path)\ +[daemon](https://nginx.org/en/docs/ngx_core_module.html#daemon)\ +[dav_access](https://nginx.org/en/docs/http/ngx_http_dav_module.html#dav_access)\ +[dav_methods](https://nginx.org/en/docs/http/ngx_http_dav_module.html#dav_methods)\ +[debug_connection](https://nginx.org/en/docs/ngx_core_module.html#debug_connection)\ +[default_type](https://nginx.org/en/docs/http/ngx_http_core_module.html#default_type)\ +[deny (ngx_http_access_module)](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny)\ +[deny (ngx_stream_access_module)](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#deny)\ +[directio](https://nginx.org/en/docs/http/ngx_http_core_module.html#directio)\ +[directio_alignment](https://nginx.org/en/docs/http/ngx_http_core_module.html#directio_alignment)\ +[disable_symlinks](https://nginx.org/en/docs/http/ngx_http_core_module.html#disable_symlinks)\ +[empty_gif](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html#empty_gif)\ +[enforce_initial_report](https://nginx.org/en/docs/ngx_mgmt_module.html#enforce_initial_report)\ +[env](https://nginx.org/en/docs/ngx_core_module.html#env)\ +[error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log)\ +[error_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page)\ +[etag](https://nginx.org/en/docs/http/ngx_http_core_module.html#etag)\ +[events](https://nginx.org/en/docs/ngx_core_module.html#events)\ +[expires](https://nginx.org/en/docs/http/ngx_http_headers_module.html#expires)\ +[extra_auth_args](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#extra_auth_args)\ +[f4f](https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f)\ +[f4f_buffer_size](https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f_buffer_size)\ +[fastcgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffer_size)\ +[fastcgi_buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffering)\ +[fastcgi_buffers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffers)\ +[fastcgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_busy_buffers_size)\ +[fastcgi_cache](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache)\ +[fastcgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_background_update)\ +[fastcgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_bypass)\ +[fastcgi_cache_key](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_key)\ +[fastcgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock)\ +[fastcgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock_age)\ +[fastcgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock_timeout)\ +[fastcgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_max_range_offset)\ +[fastcgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_methods)\ +[fastcgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_min_uses)\ +[fastcgi_cache_path](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_path)\ +[fastcgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_revalidate)\ +[fastcgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_use_stale)\ +[fastcgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_valid)\ +[fastcgi_catch_stderr](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_catch_stderr)\ +[fastcgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_connect_timeout)\ +[fastcgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_force_ranges)\ +[fastcgi_hide_header](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_hide_header)\ +[fastcgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_ignore_client_abort)\ +[fastcgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_ignore_headers)\ +[fastcgi_index](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_index)\ +[fastcgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_intercept_errors)\ +[fastcgi_keep_conn](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_keep_conn)\ +[fastcgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_limit_rate)\ +[fastcgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_max_temp_file_size)\ +[fastcgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream)\ +[fastcgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream_timeout)\ +[fastcgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream_tries)\ +[fastcgi_no_cache](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_no_cache)\ +[fastcgi_param](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_param)\ +[fastcgi_pass](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass)\ +[fastcgi_pass_header](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_header)\ +[fastcgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_request_body)\ +[fastcgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_request_headers)\ +[fastcgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_read_timeout)\ +[fastcgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_request_buffering)\ +[fastcgi_send_lowat](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_send_lowat)\ +[fastcgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_send_timeout)\ +[fastcgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_socket_keepalive)\ +[fastcgi_split_path_info](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_split_path_info)\ +[fastcgi_store](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_store)\ +[fastcgi_store_access](http://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_store_access)\ +[fastcgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_temp_file_write_size)\ +[fastcgi_temp_path](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_temp_path)\ +[flv](https://nginx.org/en/docs/http/ngx_http_flv_module.html#flv)\ +[geo (ngx_http_geo_module)](https://nginx.org/en/docs/http/ngx_http_geo_module.html#geo)\ +[geo (ngx_stream_geo_module)](https://nginx.org/en/docs/stream/ngx_stream_geo_module.html#geo)\ +[grpc_buffer_size](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_buffer_size)\ +[grpc_connect_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_connect_timeout)\ +[grpc_hide_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_hide_header)\ +[grpc_ignore_headers](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ignore_headers)\ +[grpc_intercept_errors](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_intercept_errors)\ +[grpc_next_upstream](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream)\ +[grpc_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream_timeout)\ +[grpc_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream_tries)\ +[grpc_pass](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_pass)\ +[grpc_pass_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_pass_header)\ +[grpc_read_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_read_timeout)\ +[grpc_send_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_send_timeout)\ +[grpc_set_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_set_header)\ +[grpc_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_socket_keepalive)\ +[grpc_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate)\ +[grpc_ssl_certificate_cache](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate_cache)\ +[grpc_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate_key)\ +[grpc_ssl_ciphers](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_ciphers)\ +[grpc_ssl_conf_command](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_conf_command)\ +[grpc_ssl_crl](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_crl)\ +[grpc_ssl_name](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_name)\ +[grpc_ssl_password_file](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_password_file)\ +[grpc_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_protocols)\ +[grpc_ssl_server_name](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_server_name)\ +[grpc_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_session_reuse)\ +[grpc_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_trusted_certificate)\ +[grpc_ssl_verify](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_verify)\ +[grpc_ssl_verify_depth](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_verify_depth)\ +[gunzip](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html#gunzip)\ +[gunzip_buffers](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html#gunzip_buffers)\ +[gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip)\ +[gzip_buffers](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers)\ +[gzip_comp_level](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level)\ +[gzip_disable](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_disable)\ +[gzip_http_version](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_http_version)\ +[gzip_min_length](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length)\ +[gzip_proxied](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied)\ +[gzip_static](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html#gzip_static)\ +[gzip_types](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types)\ +[gzip_vary](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_vary)\ +[hash (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash)\ +[hash (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#hash)\ +[health_check (ngx_http_upstream_hc_module)](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check)\ +[health_check (ngx_stream_upstream_hc_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check)\ +[health_check_timeout](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_timeout)\ +[hls](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls)\ +[hls_buffers](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_buffers)\ +[hls_forward_args](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_forward_args)\ +[hls_fragment](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_fragment)\ +[hls_mp4_buffer_size](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_mp4_buffer_size)\ +[hls_mp4_max_buffer_size](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_mp4_max_buffer_size)\ +[http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http)\ +[http2_body_preread_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_body_preread_size)\ +[http2_chunk_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_chunk_size)\ +[http2_idle_timeout](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_idle_timeout)\ +[http2_max_concurrent_pushes](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_pushes)\ +[http2_max_concurrent_streams](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_streams)\ +[http2_max_field_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size)\ +[http2_max_header_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size)\ +[http2_max_requests](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests)\ +[http2_push](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push)\ +[http2_push_preload](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push_preload)\ +[http2_recv_buffer_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_recv_buffer_size)\ +[http2_recv_timeout](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_recv_timeout)\ +[http3](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3)\ +[http3_hq](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_hq)\ +[http3_max_concurrent_streams](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_max_concurrent_streams)\ +[http3_stream_buffer_size](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_stream_buffer_size)\ +[if](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#if)\ +[if_modified_since](https://nginx.org/en/docs/http/ngx_http_core_module.html#if_modified_since)\ +[ignore_invalid_headers](https://nginx.org/en/docs/http/ngx_http_core_module.html#ignore_invalid_headers)\ +[image_filter](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter)\ +[image_filter_buffer](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_buffer)\ +[image_filter_interlace](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_interlace)\ +[image_filter_jpeg_quality](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_jpeg_quality)\ +[image_filter_sharpen](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_sharpen)\ +[image_filter_transparency](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_transparency)\ +[image_filter_webp_quality](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_webp_quality)\ +[imap_auth](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_auth)\ +[imap_capabilities](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_capabilities)\ +[imap_client_buffer](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_client_buffer)\ +[include](https://nginx.org/en/docs/ngx_core_module.html#include)\ +[index](https://nginx.org/en/docs/http/ngx_http_index_module.html#index)\ +[internal](https://nginx.org/en/docs/http/ngx_http_core_module.html#internal)\ +[internal_redirect](http://nginx.org/en/docs/http/ngx_http_internal_redirect_module.html#internal_redirect)\ +[ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash)\ +[issuer](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#issuer)\ +[js_access (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_access)\ +[js_body_filter](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_body_filter)\ +[js_content](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_content)\ +[js_fetch_buffer_size (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_buffer_size)\ +[js_fetch_buffer_size (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_buffer_size)\ +[js_fetch_ciphers (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_ciphers)\ +[js_fetch_ciphers (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_ciphers)\ +[js_fetch_max_response_buffer_size (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_max_response_buffer_size)\ +[js_fetch_max_response_buffer_size (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_max_response_buffer_size)\ +[js_fetch_protocols (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_protocols)\ +[js_fetch_protocols (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_protocols)\ +[js_fetch_timeout (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_timeout)\ +[js_fetch_timeout (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_timeout)\ +[js_fetch_trusted_certificate (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_trusted_certificate)\ +[js_fetch_trusted_certificate (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_trusted_certificate)\ +[js_fetch_verify (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_verify)\ +[js_fetch_verify (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_verify)\ +[js_fetch_verify_depth (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_verify_depth)\ +[js_fetch_verify_depth (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_verify_depth)\ +[js_filter (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_filter)\ +[js_header_filter](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_header_filter)\ +[js_import (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_import)\ +[js_import (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_import)\ +[js_include (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_include)\ +[js_include (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_include)\ +[js_path (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_path)\ +[js_path (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_path)\ +[js_periodic (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_periodic)\ +[js_periodic (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_periodic)\ +[js_preload_object (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_preload_object)\ +[js_preload_object (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_preload_object)\ +[js_preread (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_preread)\ +[js_set (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_set)\ +[js_set (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_set)\ +[js_var (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_var)\ +[js_var (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_var)\ +[js_shared_dict_zone (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_shared_dict_zone)\ +[js_var (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_var)\ +[js_var (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_var)\ +[keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive)\ +[keepalive_disable](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_disable)\ +[keepalive_min_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_min_timeout)\ +[keepalive_requests (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests)\ +[keepalive_time (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_time)\ +[keepalive_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout)\ +[keyval (ngx_http_keyval_module)](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval)\ +[keyval (ngx_stream_keyval_module)](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval)\ +[keyval_zone (ngx_http_keyval_module)](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone)\ +[keyval_zone (ngx_stream_keyval_module)](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone)\ +[large_client_header_buffers](https://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers)\ +[least_conn (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn)\ +[least_conn (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_conn)\ +[least_time (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time)\ +[least_time (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_time)\ +[license_token](https://nginx.org/en/docs/ngx_mgmt_module.html#license_token)\ +[limit_conn (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn)\ +[limit_conn (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn)\ +[limit_conn_dry_run (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_dry_run)\ +[limit_conn_dry_run (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_dry_run)\ +[limit_conn_log_level (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_log_level)\ +[limit_conn_log_level (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_log_level)\ +[limit_conn_status](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_status)\ +[limit_conn_zone (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone)\ +[limit_conn_zone (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone)\ +[limit_except](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_except)\ +[limit_rate](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate)\ +[limit_rate_after](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after)\ +[limit_req](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req)\ +[limit_req_dry_run](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_dry_run)\ +[limit_req_log_level](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_log_level)\ +[limit_req_status](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status)\ +[limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone)\ +[limit_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_zone)\ +[lingering_close](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_close)\ +[lingering_time](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_time)\ +[lingering_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_timeout)\ +[listen (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen)\ +[listen (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#listen)\ +[load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module)\ +[location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location)\ +[lock_file](http://nginx.org/en/docs/ngx_core_module.html#lock_file)\ +[log_format (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format)\ +[log_format (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format)\ +[log_not_found](https://nginx.org/en/docs/http/ngx_http_core_module.html#log_not_found)\ +[log_subrequest](https://nginx.org/en/docs/http/ngx_http_core_module.html#log_subrequest)\ +[mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#mail)\ +[map (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map)\ +[map (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map)\ +[map_hash_bucket_size (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size)\ +[map_hash_bucket_size (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map_hash_bucket_size)\ +[map_hash_max_size (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_max_size)\ +[map_hash_max_size (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map_hash_max_size)\ +[master_process](https://nginx.org/en/docs/ngx_core_module.html#master_process)\ +[match (ngx_http_upstream_hc_module)](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match)\ +[match (ngx_stream_upstream_hc_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match)\ +[max_errors](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#max_errors)\ +[max_ranges](https://nginx.org/en/docs/http/ngx_http_core_module.html#max_ranges)\ +[memcached_buffer_size](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_buffer_size)\ +[memcached_connect_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_connect_timeout)\ +[memcached_gzip_flag](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_gzip_flag)\ +[memcached_next_upstream](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream)\ +[memcached_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream_timeout)\ +[memcached_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream_tries)\ +[memcached_pass](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_pass)\ +[memcached_read_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_read_timeout)\ +[memcached_send_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_send_timeout)\ +[memcached_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_socket_keepalive)\ +[merge_slashes](https://nginx.org/en/docs/http/ngx_http_core_module.html#merge_slashes)\ +[mgmt](https://nginx.org/en/docs/ngx_mgmt_module.html#mgmt)\ +[min_delete_depth](https://nginx.org/en/docs/http/ngx_http_dav_module.html#min_delete_depth)\ +[mirror](https://nginx.org/en/docs/http/ngx_http_mirror_module.html#mirror)\ +[mirror_request_body](https://nginx.org/en/docs/http/ngx_http_mirror_module.html#mirror_request_body)\ +[modern_browser](https://nginx.org/en/docs/http/ngx_http_browser_module.html#modern_browser)\ +[modern_browser_value](https://nginx.org/en/docs/http/ngx_http_browser_module.html#modern_browser_value)\ +[more_clear_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_clear_headers)\ +[more_clear_input_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_clear_input_headers)\ +[more_set_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_set_headers)\ +[more_set_input_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_set_input_headers)\ +[mp4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4)\ +[mp4_buffer_size](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_buffer_size)\ +[mp4_limit_rate](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_limit_rate)\ +[mp4_limit_rate_after](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_limit_rate_after)\ +[mp4_max_buffer_size](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_max_buffer_size)\ +[mp4_start_key_frame](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_start_key_frame)\ +[mqtt](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt)\ +[mqtt_rewrite_buffer_size](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_rewrite_buffer_size)\ +[mqtt_set_connect](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_set_connect)\ +[msie_padding](https://nginx.org/en/docs/http/ngx_http_core_module.html#msie_padding)\ +[msie_refresh](https://nginx.org/en/docs/http/ngx_http_core_module.html#msie_refresh)\ +[multi_accept](https://nginx.org/en/docs/ngx_core_module.html#multi_accept)\ +[ntlm](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm)\ +[oidc_provider](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#oidc_provider)\ +[open_file_cache](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache)\ +[open_file_cache_errors](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_errors)\ +[open_file_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_min_uses)\ +[open_file_cache_valid](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_valid)\ +[open_log_file_cache (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#open_log_file_cache)\ +[open_log_file_cache (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#open_log_file_cache)\ +[otel_exporter](https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter)\ +[otel_service_name](https://nginx.org/en/docs/ngx_otel_module.html#otel_service_name)\ +[otel_trace](https://nginx.org/en/docs/ngx_otel_module.html#otel_trace)\ +[otel_trace_context](https://nginx.org/en/docs/ngx_otel_module.html#otel_trace_context)\ +[otel_span_name](https://nginx.org/en/docs/ngx_otel_module.html#otel_span_name)\ +[otel_span_attr](https://nginx.org/en/docs/ngx_otel_module.html#otel_span_attr)\ +[output_buffers](https://nginx.org/en/docs/http/ngx_http_core_module.html#output_buffers)\ +[override_charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#override_charset)\ +[pass](https://nginx.org/en/docs/stream/ngx_stream_pass_module.html#pass)\ +[pid](https://nginx.org/en/docs/ngx_core_module.html#pid)\ +[pop3_auth](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html#pop3_auth)\ +[pop3_capabilities](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html#pop3_capabilities)\ +[port_in_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#port_in_redirect)\ +[postpone_output](https://nginx.org/en/docs/http/ngx_http_core_module.html#postpone_output)\ +[preread_buffer_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#preread_buffer_size)\ +[preread_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#preread_timeout)\ +[protocol](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#protocol)\ +[proxy_buffer](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_buffer)\ +[proxy_buffer_size (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size)\ +[proxy_buffer_size (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_buffer_size)\ +[proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering)\ +[proxy_buffers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers)\ +[proxy_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_busy_buffers_size)\ +[proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache)\ +[proxy_cache_background_update](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_background_update)\ +[proxy_cache_bypass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_bypass)\ +[proxy_cache_convert_head](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_convert_head)\ +[proxy_cache_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key)\ +[proxy_cache_lock](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock)\ +[proxy_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_age)\ +[proxy_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_timeout)\ +[proxy_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_max_range_offset)\ +[proxy_cache_methods](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_methods)\ +[proxy_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_min_uses)\ +[proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path)\ +[proxy_cache_purge](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_purge)\ +[proxy_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_revalidate)\ +[proxy_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_use_stale)\ +[proxy_cache_valid](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid)\ +[proxy_connect_timeout (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout)\ +[proxy_connect_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_connect_timeout)\ +[proxy_cookie_domain](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain)\ +[proxy_cookie_flags](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_flags)\ +[proxy_cookie_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path)\ +[proxy_download_rate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_download_rate)\ +[proxy_force_ranges](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_force_ranges)\ +[proxy_half_close (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_half_close)\ +[proxy_headers_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size)\ +[proxy_headers_hash_max_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size)\ +[proxy_hide_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header)\ +[proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version)\ +[proxy_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort)\ +[proxy_ignore_headers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers)\ +[proxy_intercept_errors](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors)\ +[proxy_limit_rate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_limit_rate)\ +[proxy_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_max_temp_file_size)\ +[proxy_method](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_method)\ +[proxy_next_upstream (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream)\ +[proxy_next_upstream (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream)\ +[proxy_next_upstream_timeout (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_timeout)\ +[proxy_next_upstream_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream_timeout)\ +[proxy_next_upstream_tries (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_tries)\ +[proxy_next_upstream_tries (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream_tries)\ +[proxy_no_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_no_cache)\ +[proxy_pass (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass)\ +[proxy_pass (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass)\ +[proxy_pass_error_message](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_pass_error_message)\ +[proxy_pass_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_header)\ +[proxy_pass_request_body](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_request_body)\ +[proxy_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_request_headers)\ +[proxy_protocol (ngx_mail_proxy_module)](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_protocol)\ +[proxy_protocol (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_protocol)\ +[proxy_protocol_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#proxy_protocol_timeout)\ +[proxy_read_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout)\ +[proxy_redirect](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect)\ +[proxy_requests (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_requests)\ +[proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering)\ +[proxy_responses (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses)\ +[proxy_send_lowat](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_lowat)\ +[proxy_send_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout)\ +[proxy_session_drop (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_session_drop)\ +[proxy_set_body](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_body)\ +[proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header)\ +[proxy_smtp_auth](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_smtp_auth)\ +[proxy_socket_keepalive (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_socket_keepalive)\ +[proxy_socket_keepalive (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_socket_keepalive)\ +[proxy_ssl (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl)\ +[proxy_ssl_certificate (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate)\ +[proxy_ssl_certificate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate)\ +[proxy_ssl_certificate_cache (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_cache)\ +[proxy_ssl_certificate_cache (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate_cache)\ +[proxy_ssl_certificate_key (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key)\ +[proxy_ssl_certificate_key (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate_key)\ +[proxy_ssl_ciphers (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers)\ +[proxy_ssl_ciphers (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_ciphers)\ +[proxy_ssl_conf_command (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_conf_command)\ +[proxy_ssl_conf_command (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_conf_command)\ +[proxy_ssl_crl (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_crl)\ +[proxy_ssl_crl (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_crl)\ +[proxy_ssl_name (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_name)\ +[proxy_ssl_name (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_name)\ +[proxy_ssl_password_file (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_password_file)\ +[proxy_ssl_password_file (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_password_file)\ +[proxy_ssl_protocols (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols)\ +[proxy_ssl_protocols (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_protocols)\ +[proxy_ssl_server_name (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_server_name)\ +[proxy_ssl_server_name (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_server_name)\ +[proxy_ssl_session_reuse (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse)\ +[proxy_ssl_session_reuse (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_session_reuse)\ +[proxy_ssl_trusted_certificate (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_trusted_certificate)\ +[proxy_ssl_trusted_certificate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_trusted_certificate)\ +[proxy_ssl_verify (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify)\ +[proxy_ssl_verify (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify)\ +[proxy_ssl_verify_depth (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth)\ +[proxy_ssl_verify_depth (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify_depth)\ +[proxy_store](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_store)\ +[proxy_store_access](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_store_access)\ +[proxy_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size)\ +[proxy_temp_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_path)\ +[proxy_timeout (ngx_mail_proxy_module)](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_timeout)\ +[proxy_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout)\ +[proxy_upload_rate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate)\ +[queue](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue)\ +[quic_active_connection_id_limit](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_active_connection_id_limit)\ +[quic_gso](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_gso)\ +[quic_host_key](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_host_key)\ +[quic_retry](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_retry)\ +[random (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#random)\ +[random (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#random)\ +[random_index](https://nginx.org/en/docs/http/ngx_http_random_index_module.html#random_index)\ +[read_ahead](https://nginx.org/en/docs/http/ngx_http_core_module.html#read_ahead)\ +[read_timeout](https://nginx.org/en/docs/ngx_mgmt_module.html#read_timeout)\ +[real_ip_header](https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header)\ +[real_ip_recursive](https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive)\ +[recursive_error_pages](https://nginx.org/en/docs/http/ngx_http_core_module.html#recursive_error_pages)\ +[redirect_uri](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#redirect_uri)\ +[referer_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_bucket_size)\ +[referer_hash_max_size](https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_max_size)\ +[request_pool_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#request_pool_size)\ +[reset_timedout_connection](https://nginx.org/en/docs/http/ngx_http_core_module.html#reset_timedout_connection)\ +[resolver (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver)\ +[resolver (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#resolver)\ +[resolver (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#resolver)\ +[resolver (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#resolver)\ +[resolver (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#resolver)\ +[resolver_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#resolver_timeout)\ +[resolver_timeout (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#resolver_timeout)\ +[return (ngx_http_rewrite_module)](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return)\ +[return (ngx_stream_return_module)](https://nginx.org/en/docs/stream/ngx_stream_return_module.html#return)\ +[rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#rewrite)\ +[rewrite_log](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#rewrite_log)\ +[root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root)\ +[satisfy](https://nginx.org/en/docs/http/ngx_http_core_module.html#satisfy)\ +[scgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffer_size)\ +[scgi_buffering](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffering)\ +[scgi_buffers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffers)\ +[scgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_busy_buffers_size)\ +[scgi_cache](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache)\ +[scgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_background_update)\ +[scgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_bypass)\ +[scgi_cache_key](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_key)\ +[scgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock)\ +[scgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock_age)\ +[scgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock_timeout)\ +[scgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_max_range_offset)\ +[scgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_methods)\ +[scgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_min_uses)\ +[scgi_cache_path](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_path)\ +[scgi_cache_purge](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_purge)\ +[scgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_revalidate)\ +[scgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_use_stale)\ +[scgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_valid)\ +[scgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_connect_timeout)\ +[scgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_force_ranges)\ +[scgi_hide_header](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_hide_header)\ +[scgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_ignore_client_abort)\ +[scgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_ignore_headers)\ +[scgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_intercept_errors)\ +[scgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_limit_rate)\ +[scgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_max_temp_file_size)\ +[scgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream)\ +[scgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream_timeout)\ +[scgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream_tries)\ +[scgi_no_cache](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_no_cache)\ +[scgi_param](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_param)\ +[scgi_pass](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass)\ +[scgi_pass_header](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_header)\ +[scgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_request_body)\ +[scgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_request_headers)\ +[scgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_read_timeout)\ +[scgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_request_buffering)\ +[scgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_send_timeout)\ +[scgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_socket_keepalive)\ +[scgi_store](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_store)\ +[scgi_store_access](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_store_access)\ +[scgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_file_write_size)\ +[scgi_temp_path](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_path)\ +[scope](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#scope)\ +[secure_link](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link)\ +[secure_link_md5](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5)\ +[secure_link_secret](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_secret)\ +[send_lowat](https://nginx.org/en/docs/http/ngx_http_core_module.html#send_lowat)\ +[send_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#send_timeout)\ +[send_timeout (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#send_timeout)\ +[sendfile](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile)\ +[sendfile_max_chunk](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile_max_chunk)\ +[server (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#server)\ +[server (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server)\ +[server (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server)\ +[server (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server)\ +[server_name (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name)\ +[server_name (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server_name)\ +[server_name (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server_name)\ +[server_name_in_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name_in_redirect)\ +[server_tokens](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens)\ +[session_log](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log)\ +[session_log_format](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_format)\ +[session_log_zone](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_zone)\ +[session_store](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#session_store)\ +[session_timeout](https://nginx.org/en/docs/http/ngx_http_oidc_module.html#session_timeout)\ +[set (ngx_http_rewrite_module)](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set)\ +[set (ngx_stream_set_module)](https://nginx.org/en/docs/stream/ngx_stream_set_module.html#set)\ +[set_real_ip_from (ngx_http_realip_module)](https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from)\ +[set_real_ip_from (ngx_mail_realip_module)](https://nginx.org/en/docs/mail/ngx_mail_realip_module.html#set_real_ip_from)\ +[set_real_ip_from (ngx_stream_realip_module)](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html#set_real_ip_from)\ +[slice](https://nginx.org/en/docs/http/ngx_http_slice_module.html#slice)\ +[smtp_auth](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_auth)\ +[smtp_capabilities](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_capabilities)\ +[smtp_client_buffer](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_client_buffer)\ +[smtp_greeting_delay](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_greeting_delay)\ +[source_charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#source_charset) +[split_clients (ngx_http_split_clients_module)](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html#split_clients)\ +[split_clients (ngx_stream_split_clients_module)](https://nginx.org/en/docs/stream/ngx_stream_split_clients_module.html#split_clients)\ +[ssi](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi)\ +[ssi_last_modified](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_last_modified)\ +[ssi_min_file_chunk](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_min_file_chunk)\ +[ssi_silent_errors](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_silent_errors)\ +[ssi_types](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_types)\ +[ssi_value_length](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_value_length)\ +[ssl (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl)\ +[ssl (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl)\ +[ssl (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl)\ +[ssl_buffer_size](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size)\ +[ssl_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_certificate)\ +[ssl_certificate_cache (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_cache)\ +[ssl_certificate_cache (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate_cache)\ +[ssl_certificate_key (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_certificate_key)\ +[ssl_ciphers (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers)\ +[ssl_ciphers (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_ciphers)\ +[ssl_ciphers (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_ciphers)\ +[ssl_client_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_client_certificate)\ +[ssl_client_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_client_certificate)\ +[ssl_client_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_client_certificate)\ +[ssl_conf_command (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_conf_command)\ +[ssl_conf_command (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_conf_command)\ +[ssl_conf_command (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_conf_command)\ +[ssl_crl (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_crl)\ +[ssl_crl (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_crl)\ +[ssl_crl (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_crl)\ +[ssl_dhparam (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam)\ +[ssl_dhparam (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_dhparam)\ +[ssl_early_data](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_early_data)\ +[ssl_ecdh_curve (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve)\ +[ssl_ecdh_curve (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_ecdh_curve)\ +[ssl_ecdh_curve (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ecdh_curve)\ +[ssl_handshake_timeout](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_handshake_timeout)\ +[ssl_name](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_name)\ +[ssl_object_cache_inheritable](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_object_cache_inheritable)\ +[ssl_ocsp](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp)\ +[ssl_ocsp_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_cache)\ +[ssl_ocsp_responder](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_responder)\ +[ssl_password_file (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_password_file)\ +[ssl_password_file (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_password_file)\ +[ssl_password_file (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_password_file)\ +[ssl_prefer_server_ciphers (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_prefer_server_ciphers (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_prefer_server_ciphers (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_preread (ngx_stream_ssl_preread_module)](http://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_protocol)\ +[ssl_protocols (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_protocols)\ +[ssl_reject_handshake](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_reject_handshake)\ +[ssl_server_name](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_server_name)\ +[ssl_session_cache (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache)\ +[ssl_session_cache (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_cache)\ +[ssl_session_cache (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_cache)\ +[ssl_session_ticket_key (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_ticket_key (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_ticket_key (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_tickets (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets)\ +[ssl_session_tickets (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_tickets)\ +[ssl_session_tickets (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_tickets)\ +[ssl_session_timeout (ngx_http_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_timeout)\ +[ssl_session_timeout (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_timeout)\ +[ssl_session_timeout (ngx_stream_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout)\ +[ssl_stapling](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling)\ +[ssl_stapling_file](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_file)\ +[ssl_stapling_responder](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_responder)\ +[ssl_stapling_verify](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_verify)\ +[ssl_trusted_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_trusted_certificate)\ +[ssl_verify](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify)\ +[ssl_verify_client (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_verify_client)\ +[ssl_verify_client (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_verify_client)\ +[ssl_verify_client (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_verify_client)\ +[ssl_verify_depth (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify_depth)\ +[starttls](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#starttls)\ +[state (ngx_http_upstream_module)](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#state)\ +[status_zone (ngx_http_api_module)](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone)\ +[sticky](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky)\ +[sticky_cookie_insert](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_cookie_insert)\ +[stream (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream)\ +[stub_status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html#stub_status)\ +[sub_filter](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter)\ +[sub_filter_last_modified](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_last_modified)\ +[sub_filter_once](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_once)\ +[sub_filter_types](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_types)\ +[subrequest_output_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#subrequest_output_buffer_size)\ +[tcp_nodelay (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nodelay)\ +[tcp_nodelay (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#tcp_nodelay)\ +[tcp_nopush](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nopush)\ +[thread_pool](https://nginx.org/en/docs/ngx_core_module.html#thread_pool)\ +[timeout](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#timeout)\ +[timer_resolution](https://nginx.org/en/docs/ngx_core_module.html#timer_resolution)\ +[try_files](https://nginx.org/en/docs/http/ngx_http_core_module.html#try_files)\ +[types](https://nginx.org/en/docs/http/ngx_http_core_module.html#types)\ +[types_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#types_hash_bucket_size)\ +[types_hash_max_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#types_hash_max_size)\ +[underscores_in_headers](https://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers)\ +[uninitialized_variable_warn](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#uninitialized_variable_warn)\ +[upstream (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream)\ +[upstream (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream)\ +[upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html#upstream_conf)\ +[usage_report](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report)\ +[use](https://nginx.org/en/docs/ngx_core_module.html#use)\ +[user](https://nginx.org/en/docs/ngx_core_module.html#user)\ +[userid](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid)\ +[userid_domain](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_domain)\ +[userid_expires](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_expires)\ +[userid_flags](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_flags)\ +[userid_mark](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_mark)\ +[userid_name](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_name)\ +[userid_p3p](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_p3p)\ +[userid_path](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_path)\ +[userid_service](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_service)\ +[uuid_file](https://nginx.org/en/docs/ngx_mgmt_module.html#uuid_file)\ +[uwsgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffer_size)\ +[uwsgi_buffering](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffering)\ +[uwsgi_buffers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffers)\ +[uwsgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_busy_buffers_size)\ +[uwsgi_cache](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache)\ +[uwsgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_background_update)\ +[uwsgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_bypass)\ +[uwsgi_cache_key](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_key)\ +[uwsgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock)\ +[uwsgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_age)\ +[uwsgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_timeout)\ +[uwsgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_max_range_offset)\ +[uwsgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_methods)\ +[uwsgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_min_uses)\ +[uwsgi_cache_path](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_path)\ +[uwsgi_cache_purge](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_purge)\ +[uwsgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_revalidate)\ +[uwsgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_use_stale)\ +[uwsgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_valid)\ +[uwsgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_connect_timeout)\ +[uwsgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_force_ranges)\ +[uwsgi_hide_header](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_hide_header)\ +[uwsgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ignore_client_abort)\ +[uwsgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ignore_headers)\ +[uwsgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_intercept_errors)\ +[uwsgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_limit_rate)\ +[uwsgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_max_temp_file_size)\ +[uwsgi_modifier1](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_modifier1)\ +[uwsgi_modifier2](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_modifier2)\ +[uwsgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream)\ +[uwsgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_timeout)\ +[uwsgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_tries)\ +[uwsgi_no_cache](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_no_cache)\ +[uwsgi_param](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_param)\ +[uwsgi_pass](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass)\ +[uwsgi_pass_header](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_header)\ +[uwsgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_request_body)\ +[uwsgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_request_headers)\ +[uwsgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_read_timeout)\ +[uwsgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_request_buffering)\ +[uwsgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_send_timeout)\ +[uwsgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_socket_keepalive)\ +[uwsgi_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate)\ +[uwsgi_ssl_certificate_cache](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate_cache)\ +[uwsgi_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate_key)\ +[uwsgi_ssl_conf_command](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_conf_command)\ +[uwsgi_ssl_crl](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_crl)\ +[uwsgi_ssl_name](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_name)\ +[uwsgi_ssl_password_file](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_password_file)\ +[uwsgi_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_protocols)\ +[uwsgi_ssl_server_name](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_server_name)\ +[uwsgi_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_session_reuse)\ +[uwsgi_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_trusted_certificate)\ +[uwsgi_ssl_verify](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify)\ +[uwsgi_ssl_verify_depth](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify_depth)\ +[uwsgi_store](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_store)\ +[uwsgi_store_access](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_store_access)\ +[uwsgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_temp_file_write_size)\ +[uwsgi_temp_path](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_temp_path)\ +[valid_referers](https://nginx.org/en/docs/http/ngx_http_referer_module.html#valid_referers)\ +[variables_hash_bucket_size (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables_hash_bucket_size)\ +[variables_hash_bucket_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables_hash_bucket_size)\ +[variables_hash_max_size (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables_hash_max_size)\ +[variables_hash_max_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables_hash_max_size)\ +[worker_aio_requests](https://nginx.org/en/docs/ngx_core_module.html#worker_aio_requests)\ +[worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections)\ +[worker_cpu_affinity](https://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity)\ +[worker_priority](https://nginx.org/en/docs/ngx_core_module.html#worker_priority)\ +[worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes)\ +[worker_rlimit_core](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_core)\ +[worker_rlimit_nofile](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile)\ +[worker_shutdown_timeout](https://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout)\ +[working_directory](https://nginx.org/en/docs/ngx_core_module.html#working_directory)\ +[xclient](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#xclient)\ +[xml_entities](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xml_entities)\ +[xslt_last_modified](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_last_modified)\ +[xslt_param](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_param)\ +[xslt_string_param](http://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_string_param)\ +[xslt_stylesheet](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_stylesheet)\ +[xslt_types](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_types)\ +[zone (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone)\ +[zone (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone)\ + +{{< /details >}} diff --git a/content/nginxaas-google/getting-started/prerequisites.md b/content/nginxaas-google/getting-started/prerequisites.md new file mode 100644 index 000000000..9aff4e980 --- /dev/null +++ b/content/nginxaas-google/getting-started/prerequisites.md @@ -0,0 +1,32 @@ +--- +title: Prerequisites +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/prerequisites/ +type: +- how-to +--- + +Before you can deploy F5 NGINXaaS for Google Cloud (NGINXaaS), you need to complete some prerequisites. + +## Subscribe to the NGINXaaS for Google Cloud offering + +If it's your first time using NGINXaaS for Google Cloud, you need to find the offering in the Google Cloud Marketplace and subscribe to it: + +### Get the offering in the Google Cloud Marketplace + +1. Find the [NGINXaaS for Google Cloud product listing in the Google Cloud Marketplace](https://console.cloud.google.com/marketplace/product/f5-7626-networks-public/nginxaas-google-cloud). +1. Login with your Google Cloud account. +1. Select **Subscribe** to subscribe to the NGINXaaS for Google Cloud offering. +1. Select the **Enterprise** plan using the dropdown menu. + - You can use the pricing calculator to estimate the cost of your deployment + based on your expected usage. +1. Select the billing account you want to use for this deployment. +1. Agree to the terms of service and privacy policy. +1. Select **Subscribe**. You will be presented with a NGINXaaS welcome page upon success. +1. You can now log into the NGINXaaS for Google Cloud console by selecting **Manage on provider** from the Google Cloud Marketplace. + +## What's next + +[Create a Deployment]({{< ref "/nginxaas-google/getting-started/create-deployment/deploy-console.md" >}}) diff --git a/content/nginxaas-google/getting-started/ssl-tls-certificates/_index.md b/content/nginxaas-google/getting-started/ssl-tls-certificates/_index.md new file mode 100644 index 000000000..78bcc324b --- /dev/null +++ b/content/nginxaas-google/getting-started/ssl-tls-certificates/_index.md @@ -0,0 +1,5 @@ +--- +title: Add SSL-TLS certificates +weight: 400 +url: /nginxaas/google/getting-started/ssl-tls-certificates/ +--- diff --git a/content/nginxaas-google/getting-started/ssl-tls-certificates/overview.md b/content/nginxaas-google/getting-started/ssl-tls-certificates/overview.md new file mode 100644 index 000000000..d99b56002 --- /dev/null +++ b/content/nginxaas-google/getting-started/ssl-tls-certificates/overview.md @@ -0,0 +1,40 @@ +--- +title: Overview +weight: 50 +toc: true +url: /nginxaas/google/getting-started/ssl-tls-certificates/overview/ +type: +- how-to +--- + + +F5 NGINXaaS for Google Cloud (NGINXaaS) enables customers to secure traffic by adding SSL/TLS certificates to a deployment. + +This document provides details about using SSL/TLS certificates with your F5 NGINXaaS for Google Cloud deployment. + +## Supported certificate types and formats + +NGINXaaS supports certificates of the following types: + +- Self-signed +- Domain Validated (DV) +- Organization Validated (OV) +- Extended Validation (EV) + +NGINX supports the following certificate formats: + +- PEM format certificates. + +NGINXaaS allows you to upload these certificates as text and as files. + +Encrypt your certificates, keys, and PEM files using one of these standards: + +- RSA +- ECC/ECDSA + +## Add SSL/TLS certificates + +Add a certificate to your NGINXaaS deployment using your preferred client tool: + +- [Add certificates using the NGINXaaS Console]({{< ref "/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md" >}}) + diff --git a/content/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md b/content/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md new file mode 100644 index 000000000..38b4d2824 --- /dev/null +++ b/content/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md @@ -0,0 +1,77 @@ +--- +title: Add certificates using the Console +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console/ +type: +- how-to +--- + +You can manage SSL/TSL certificates for F5 NGINXaaS for Google Cloud (NGINXaaS) using the NGINXaaS console. + +## Prerequisites + +If you haven't already done so, complete the following prerequisites: + +- Subscribe to the NGINXaaS for Google Cloud offer. See [Subscribe to the NGINXaaS for Google Cloud offer]({{< ref "/nginxaas-google/getting-started/prerequisites.md" >}}). +- Create a deployment. See [Deploy using the console]({{< ref "/nginxaas-google/getting-started/create-deployment/deploy-console.md" >}}). +- Access the console visiting [https://console.nginxaas.net/](https://console.nginxaas.net/). +- Log in to the NGINXaaS console with your Google credentials. + +## Add an SSL/TLS certificate to NGINXaaS + +- Select **Certificates** in the left menu. +- Select {{< icon "plus">}} **Add Certificate**. +- In the **Add Certificate** panel, provide the required information: + {{< table >}} + + | Field | Description | + |---------------------------- | ---------------------------- | + | Name | A unique name for the certificate. | + | Type | Select the type of certificate you are adding. SSL certificate and key, or CA certificate bundle. | + | Certificate Import Options | Choose how you want to import the certificate. Enter the certificate text or upload a file. | + + {{< /table >}} + +- Repeat the same steps to add as many certificates as needed. + +### Use a certificate in an NGINX configuration + +To use a certificate in an NGINX configuration, follow these steps: + +- Select **Configurations** in the left menu. +- Select the ellipsis (three dots) next to the configuration you want to edit, and select **Edit**. +- Select **Continue** to open the configuration editor. +- In your configuration, select {{< icon "plus">}} **Add File** and either choose to use an existing certificate or add a new one. + - If you want to add a new certificate, select **New SSL Certificate or CA Bundle** and follow the steps mentioned in [Add an SSL/TLS certificate to NGINXaaS](#add-an-ssltls-certificate-to-nginxaas). + - If you want to use an existing certificate, select **Existing SSL Certificate or CA Bundle** and use the menu to choose a certificate from the list of certificates you have already added. +- Provide the required path information: + {{< table >}} + + | Field | Description | Note | + |---------------------------- | ---------------------------- | ---- | + | Certificate File Path | This path can match one or more ssl_certificate directive file arguments in your NGINX configuration. | The certificate path must be unique within the same deployment. | + | Key File Path | This path can match one or more ssl_certificate_key directive file arguments in your NGINX configuration. | The key path must be unique within the same deployment. | + + {{< /table >}} +- Update the NGINX configuration to reference the certificate you just added by the path value. +- Select **Continue** and then **Save** to save your changes. + +### Edit an SSL/TLS certificate + +{{< include "/nginxaas-google/update-nginx-config.md" >}} + +### Delete an SSL/TLS certificate + +- Select **Certificates** in the left menu. +- On the list of certificates, select the ellipses (three dots) icon next to the certificate you want to delete. +- Select **Delete**. +- Confirm that you want to delete the certificate. + +{{< call-out "warning" >}}Deleting a TLS/SSL certificate currently in-use by the NGINXaaS for Google Cloud deployment will cause an error.{{< /call-out >}} + + +## What's next + +[Upload an NGINX Configuration]({{< ref "/nginxaas-google/getting-started/nginx-configuration/nginx-configuration-console.md" >}}) diff --git a/content/nginxaas-google/glossary.md b/content/nginxaas-google/glossary.md new file mode 100644 index 000000000..3dc00c2f5 --- /dev/null +++ b/content/nginxaas-google/glossary.md @@ -0,0 +1,26 @@ +--- +title: Glossary +weight: 900 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/glossary/ +type: +- reference +--- + +This document provides definitions for terms and acronyms commonly used in F5 NGINXaaS for Google Cloud (NGINXaaS) documentation. + +{{
      }} + +| Term | Description | +| ------------------------ | -------------------------------------------------------------------------------------| +| Authorized Domains | The list of domains allowed to authenticate into the NGINXaaS Account using Google authentication.
      - This can be used to restrict access to Google identities within your Google Cloud Organization or Google Workspace, or other known, trusted Workspaces. For example, your Google Cloud Organization may have users created under the `example.com` domain. By setting the Authorized Domains in your NGINXaaS Account to only allow `example.com`, users attempting to log in with the same email associated with `alternative.net` Google Workspace would not be authenticated. | +| GC (Geographical Controller)| Geographical Controller (GC) is a control plane that serves users in a given geographical boundary while taking into account concerns relating to data residency and localization. Example: A US geographical controller serves US customers. We currently have presence in two Geographies: **US** and **EU**. | +| NGINXaas Account | Represents a Google Cloud procurement with an active Marketplace NGINXaaS subscription, linked to a billing account. To create an account, see the signup documentation in [prerequisites]({{< ref "/nginxaas-google/getting-started/prerequisites.md" >}}). | +| NGINXaaS User | NGINXaaS Users are granted access to all resources in the NGINXaaS Account. User authentication is performed securely via Google Cloud, requiring a matching identity. Individuals can be added as users to multiple NGINXaaS Accounts, and can switch between them using the steps documented below. | +| Network attachment | A Google Cloud resource that enables a VM instance to connect to a VPC network. [More information](https://cloud.google.com/vpc/docs/about-network-attachments). | +| VPC network | A Virtual Private Cloud (VPC) network is a virtual version of a physical network, implemented within Google Cloud. It provides networking functionality for your Google Cloud resources. [More information](https://cloud.google.com/vpc/docs/vpc). | + + + +{{
      }} \ No newline at end of file diff --git a/content/nginxaas-google/known-issues.md b/content/nginxaas-google/known-issues.md new file mode 100644 index 000000000..1f640fc77 --- /dev/null +++ b/content/nginxaas-google/known-issues.md @@ -0,0 +1,14 @@ +--- +title: "Known issues" +weight: 1000 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/known-issues/ + +--- + +List of known issues in the latest release of F5 NGINXaaS for Google Cloud (NGINXaaS). + +_There are currently no known issues._ + +{{< call-out "note" >}} You may also want to be familiar with the documented [NGINXaaS limitations]({{< ref "nginxaas-google/overview.md#limitations" >}}). {{< /call-out >}} \ No newline at end of file diff --git a/content/nginxaas-google/monitoring/_index.md b/content/nginxaas-google/monitoring/_index.md new file mode 100644 index 000000000..b42e915b4 --- /dev/null +++ b/content/nginxaas-google/monitoring/_index.md @@ -0,0 +1,5 @@ +--- +title: Logging and monitoring +weight: 300 +url: /nginxaas/google/monitoring/ +--- diff --git a/content/nginxaas-google/monitoring/access-management.md b/content/nginxaas-google/monitoring/access-management.md new file mode 100644 index 000000000..b7e4cac47 --- /dev/null +++ b/content/nginxaas-google/monitoring/access-management.md @@ -0,0 +1,70 @@ +--- +title: Identity and access management +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/getting-started/access-management/ +type: +- how-to +--- + + + +F5 NGINXaaS for Google Cloud (NGINXaaS) leverages Workload Identity Federation (WIF) to integrate with Google Cloud services. For example, when WIF is configured, NGINXaaS can export logs and metrics from your deployment to Cloud Monitoring in your chosen Google project. To learn more about WIF on Google Cloud, see [Google's Workload Identity Federation documentation](https://cloud.google.com/iam/docs/workload-identity-federation). + +## Prerequisites + +- In the project you're configuring WIF in, you need the following roles to create a workload identity pool, provider, and policy bindings: + - [iam.workloadIdentityPoolAdmin](https://cloud.google.com/iam/docs/roles-permissions/iam#iam.workloadIdentityPoolAdmin) + - [resourcemanager.projectIamAdmin](https://cloud.google.com/iam/docs/roles-permissions/resourcemanager#resourcemanager.projectIamAdmin) +- An NGINXaaS deployment. See [our documentation on creating an NGINXaaS deployment]({{< ref "/nginxaas-google/getting-started/create-deployment/" >}}) for a step-by-step guide. + +## Configure WIF + +### Create a Workload Identity Pool and Provider + +1. Create a workload identity pool. See [Google's documentation on configuring Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create-pool-provider) for a step-by-step guide. +1. Create an OIDC workload identity pool provider. See [Google's documentation on creating a workload identity pool provider](https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create-pool-provider) for a step-by-step guide. Set up the provider settings as follows: + - `Issuer URL` must be `https://accounts.google.com`. + - `Allowed audiences` must contain the full canonical resource name of the workload identity pool provider, for example, `https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/`. If `Allowed audiences` is empty, the full canonical resource name of the workload identity pool provider will be included by default. + - Add the following **attribute mapping**: `google.subject=assertion.sub`. + - Add the following **attribute condition**: `assertion.sub=='$NGINXAAS_SERVICE_ACCOUNT_UNIQUE_ID'` where `$NGINXAAS_SERVICE_ACCOUNT_UNIQUE_ID` is your NGINXaaS deployment's service account's unique ID. + +### Grant access to the WIF principal with your desired roles + +In the [Google Cloud Console](https://console.cloud.google.com/), +1. Select your google project you want to grant access on. For example, to grant access to export logs to a Google project, `$LOG_PROJECT_ID`, or to export metrics to a Google project, `$METRIC_PROJECT_ID`, go to that project. +1. Go to the **IAM** page. +1. Select **Grant Access**. +1. Enter your principal, for example, `principal://iam.googleapis.com/projects/$WIF_PROJECT_NUMBER/locations/global/workloadIdentityPools/$WIF_POOL_ID/subject/$NGINXAAS_SERVICE_ACCOUNT_UNIQUE_ID`. +1. Assign roles. For example, + - To grant access to export logs, add the **Logs Writer** role. + - To grant access to export metrics, add the **Monitoring Metric Writer** role. + +Alternatively, to use the Google Cloud CLI, you can run the following `gcloud` commands. +- To grant access to export logs to a Google project, `$LOG_PROJECT_ID`, + ```bash + gcloud projects add-iam-policy-binding "$LOG_PROJECT_ID" \ + --member="principal://iam.googleapis.com/projects/$WIF_PROJECT_NUMBER/locations/global/workloadIdentityPools/$WIF_POOL_ID/subject/$NGINXAAS_SERVICE_ACCOUNT_UNIQUE_ID" \ + --role='roles/logging.logWriter' + ``` +- To grant access to export metrics to a Google project, `$METRIC_PROJECT_ID`, + ```bash + gcloud projects add-iam-policy-binding "$METRIC_PROJECT_ID" \ + --member="principal://iam.googleapis.com/projects/$WIF_PROJECT_NUMBER/locations/global/workloadIdentityPools/$WIF_POOL_ID/subject/$NGINXAAS_SERVICE_ACCOUNT_UNIQUE_ID" \ + --role='roles/monitoring.metricWriter' + ``` + +See [Google's documentation on granting access](https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#access) for more information. + +### Update your NGINXaaS deployment with the name of your workload identity pool provider + +In the NGINXaaS Console, +1. On the navigation menu, select **Deployments**. +1. Select the deployment you want to update and select **Edit**. +1. Enter your provider name, for example, `projects//locations//workloadIdentityPools//providers/`, under **Workload Identity Pool Provider Name**. +1. Select **Update**. + +## What's next + +[Add SSL/TLS Certificates]({{< ref "/nginxaas-google/getting-started/ssl-tls-certificates/ssl-tls-certificates-console.md" >}}) diff --git a/content/nginxaas-google/monitoring/configure-alerts.md b/content/nginxaas-google/monitoring/configure-alerts.md new file mode 100644 index 000000000..eb3a96098 --- /dev/null +++ b/content/nginxaas-google/monitoring/configure-alerts.md @@ -0,0 +1,21 @@ +--- +title: Configure alerts +weight: 300 +toc: true +draft: true +nd-docs: DOCS-000 +url: /nginxaas/google/monitoring/configure-alerts/ +type: +- how-to +--- + +This guide explains how to create and configure metrics-based alerts for your F5 NGINXaaS for Google Cloud +deployment. + + +## Prerequisites + + + +## Create metrics-based alerts for proactive monitoring. + diff --git a/content/nginxaas-google/monitoring/enable-monitoring.md b/content/nginxaas-google/monitoring/enable-monitoring.md new file mode 100644 index 000000000..7dc2c29f3 --- /dev/null +++ b/content/nginxaas-google/monitoring/enable-monitoring.md @@ -0,0 +1,113 @@ +--- +title: Enable monitoring +weight: 200 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/monitoring/enable-monitoring/ +type: +- how-to +--- + +Monitoring your application's performance is crucial for maintaining its reliability and efficiency. F5 NGINXaaS for Google Cloud (NGINXaaS) seamlessly integrates with Google Cloud services, allowing you to collect, correlate, and analyze metrics for a thorough understanding of your application's health and behavior. + + +## Prerequisites + +- Enable the [Cloud Monitoring API](https://cloud.google.com/monitoring/api/enable-api). +- Configure Workload Identity Federation (WIF). See [our documentation on setting up WIF]({{< ref "/nginxaas-google/monitoring/access-management.md#configure-wif" >}}) for exact steps. +- Grant a project-level role or grant your principal access to the `roles/monitoring.viewer` role. See [Google's documentation on controlling access to Cloud Monitoring with IAM](https://cloud.google.com/monitoring/access-control). + +## Export NGINXaaS metrics to a Google Cloud Project + +To enable sending metrics to your desired Google Cloud project, you must specify the project ID when creating or updating a deployment. To create a deployment, see [our documentation on creating an NGINXaaS deployment]({{< ref "/nginxaas-google/getting-started/create-deployment/" >}}) for a step-by-step guide. To update the deployment, in the NGINXaaS console, + +1. On the navigation menu, select **Deployments**. +1. Select the deployment you want to update and select **Edit**. +1. Enter the project you want metrics to be send to under **Metric Project ID**. +1. Select **Update**. + +## View NGINXaaS metrics in Google Cloud Monitoring + +See the [Metrics Catalog]({{< ref "/nginxaas-google/monitoring/metrics-catalog.md" >}}) for a full list of metrics NGINXaaS provides. + +### Google Cloud Console's Metrics Explorer + +Log in to your [Google Cloud Console](https://console.cloud.google.com/), + +1. Go to your metric project. +2. Search for "Metrics Explorer". + +Refer to the [Google's Metrics Explorer](https://cloud.google.com/monitoring/charts/metrics-explorer) documentation to learn how you can create charts and queries. + +### Google Cloud Monitoring API + +You can retrieve raw time series metrics from the [Cloud Monitoring API](https://cloud.google.com/monitoring/api/v3). + +For example, you can use [`projects.timeSeries.list`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) to list metrics matching filters from a specified time interval. The following `curl` command lists `nginx.http.requests` metrics from the time interval `start_time` to `end_time` in the given `project_id`. + +```bash +curl \ + "https://monitoring.googleapis.com/v3/projects/{project_id}/timeSeries?filter=metric.type%3D%22workload.googleapis.com%2Fnginx.http.requests%22&interval.endTime={end_time}&interval.startTime={start_time}" \ + --header "Authorization: Bearer $(gcloud auth print-access-token)" \ + --header "Accept: application/json" \ + --compressed +``` + +See [Google's documentation to authenticate for using REST](https://cloud.google.com/docs/authentication/rest) for more information. + +The following JSON shows an example response body: + +```json +{ + "timeSeries": [ + { + "metric": { + "labels": { + "nginxaas_deployment_location": "us-east1", + "nginxaas_deployment_object_id": "depl_AZjtL2OUdCeh-DROeCLp1w", + "nginxaas_account_id": "account-id", + "service_name": "unknown_service:naasagent", + "instrumentation_source": "naasagent", + "nginxaas_deployment_name": "test-deployment", + "nginxaas_namespace": "default" + }, + "type": "workload.googleapis.com/nginx.http.requests" + }, + "resource": { + "type": "generic_node", + "labels": { + "node_id": "", + "location": "global", + "namespace": "", + "project_id": "{project_id}" + } + }, + "metricKind": "CUMULATIVE", + "valueType": "INT64", + "points": [ + { + "interval": { + "startTime": "{start_time}", + "endTime": "{end_time}" + }, + "value": { + "int64Value": "1405" + } + } + ] + } + ], +} +``` + +{{< call-out "note" >}}Many of NGINX Plus's advanced statistics need to be enabled in the "nginx.conf" file before they will appear in the Metrics Explorer, for example "plus.http.request.bytes_*". Refer to [Gathering Data to Appear in Statistics]({{< ref "/nginx/admin-guide/monitoring/live-activity-monitoring.md#gathering-data-to-appear-in-statistics" >}}) to learn more.{{< /call-out >}} + +## Disable exporting NGINXaaS metrics to a Google Cloud project + +To disable sending metrics to your Google Cloud project, update your NGINXaaS deployment to remove the reference to your project ID. To update the deployment, in the NGINXaaS console, + +1. On the navigation menu, select **Deployments**. +1. Select the deployment you want to update and select **Edit**. +1. Remove the project ID under **Metric Project ID**. +1. Select **Update**. + diff --git a/content/nginxaas-google/monitoring/enable-nginx-logs.md b/content/nginxaas-google/monitoring/enable-nginx-logs.md new file mode 100644 index 000000000..8ed9a8164 --- /dev/null +++ b/content/nginxaas-google/monitoring/enable-nginx-logs.md @@ -0,0 +1,65 @@ +--- +title: Enable NGINX logs +weight: 350 +toc: true +draft: false +nd-docs: DOCS-000 +url: /nginxaas/google/monitoring/enable-nginx-logs/ +type: +- how-to +--- + +F5 NGINXaaS for Google (NGINXaaS) supports integrating with Google Cloud services to collect NGINX error and access logs. + +## Prerequisites + +- Enable the [Cloud Logging API](https://docs.cloud.google.com/logging/docs/api/enable-api). +- Configure Workload Identity Federation (WIF). See [our documentation on setting up WIF]({{< ref "/nginxaas-google/monitoring/access-management.md#configure-wif" >}}) for exact steps. +- Grant a project-level role or grant your principal access to the `roles/logging.viewer` role. See [Google's documentation on controlling access to Cloud Logging with IAM](https://cloud.google.com/logging/docs/access-control). + +## Setting up error logs + +{{< include "/nginxaas-google/logging-config-error-logs.md" >}} + +## Setting up access logs + +{{< include "/nginxaas-google/logging-config-access-logs.md" >}} + + +## Export NGINX logs to a Google Cloud Project + +To enable sending logs to your desired Google Cloud project, you must specify the project ID when creating or updating a deployment. To create a deployment, see [our documentation on creating an NGINXaaS deployment]({{< ref "/nginxaas-google/getting-started/create-deployment/" >}}) for a step-by-step guide. To update the deployment, in the NGINXaaS console, + +1. On the left menu, select **Deployments**. +1. Select the deployment you want to update and select **Edit**. +1. Enter the project you want metrics to be send to under **Log Project ID**. +1. Select **Update**. + +## View NGINX logs in Google Cloud Logging + +In the [Google Cloud Console](https://console.cloud.google.com/), + +1. Go to your log project. +2. Search for "Logs Explorer". + +Refer to the [Google's Logs Explorer](https://cloud.google.com/logging/docs/view/logs-explorer-interface) documentation to learn how you can create queries. + + +NGINX access and error logs sent to Cloud Logging will have the log name `nginx-logs` which can be used to filter NGINX logs from the rest of your project logs. You can also filter based on log labels, for example, + +* `filename` +* `nginxaas_account_id` +* `nginxaas_deployment_location` +* `nginxaas_deployment_name` +* `nginxaas_deployment_object_id` +* `nginxaas_namespace` + +## Disable Exporting NGINX logs to a Google Cloud Project + +To disable sending logs to your Google Cloud project, update your NGINXaaS deployment to remove the reference to your project ID. To update the deployment, in the NGINXaaS console, + +1. On the navigation menu, select **Deployments**. +1. Select the deployment you want to update and select **Edit**. +1. Remove the project ID under **Log Project ID**. +1. Select **Update**. + diff --git a/content/nginxaas-google/monitoring/metrics-catalog.md b/content/nginxaas-google/monitoring/metrics-catalog.md new file mode 100644 index 000000000..ce6f54197 --- /dev/null +++ b/content/nginxaas-google/monitoring/metrics-catalog.md @@ -0,0 +1,160 @@ +--- +title: Metrics catalog +weight: 400 +toc: false +nd-docs: DOCS-000 +url: /nginxaas/google/monitoring/metrics-catalog/ +type: +- concept +--- + +F5 NGINXaaS for Google Cloud (NGINXaaS) provides a rich set of metrics that you can use to monitor the health and performance of your NGINXaaS deployment. This document provides a catalog of the metrics that are available for monitoring NGINXaaS for Google Cloud. + +## Available metrics + +- [Available metrics](#available-metrics) +- [Metrics](#metrics) + - [NGINX config statistics](#nginx-config-statistics) + - [NGINX connections statistics](#nginx-connections-statistics) + - [NGINX requests and response statistics](#nginx-requests-and-response-statistics) + - [NGINX SSL statistics](#nginx-ssl-statistics) + - [NGINX cache statistics](#nginx-cache-statistics) + - [NGINX worker statistics](#nginx-worker-statistics) + - [NGINX upstream statistics](#nginx-upstream-statistics) + - [NGINX stream statistics](#nginx-stream-statistics) + +## Metrics + +The following metrics are reported by NGINXaaS for Google Cloud in Google Cloud Monitoring. +The metrics are categorized by the namespace used in Google Cloud Monitoring. The labels allow you to filter or split your queries in Google Cloud Monitoring providing you with a granular view over the metrics reported. + +### NGINX config statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +| --------------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| nginx.config.reloads | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location | count | The total number of NGINX configuration reloads since NGINX was last started. | deployment | + +{{< /table >}} + +### NGINX connections statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|------------------------------|----------------|----------|---------------------------------------------------------------------------------------------------------------|-----------------| +| nginx.http.connections | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_connections_outcome | count | The total number of client connections since NGINX was last started, categorized by outcome (accepted, active, dropped, idle). | deployment | +| nginx.http.connection.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_connections_outcome | gauge | The current number of client connections, categorized by outcome (accepted, active, dropped, idle). | deployment | + +{{< /table >}} + +### NGINX requests and response statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.http.request.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location | gauge | The total number of client requests received since the last collection interval. | deployment | +| nginx.http.requests | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_zone_type | count | The total number of client requests received since NGINX was last started or reloaded. | zone | +| nginx.http.responses | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_zone_type | count | The total number of HTTP responses sent to clients since NGINX was last started or reloaded. | zone | +| nginx.http.response.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_status_range, nginx_zone_name, nginx_zone_type | gauge | The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range. | zone | +| nginx.http.response.status | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_status_range, nginx_zone_name, nginx_zone_type | count | The total number of responses since NGINX was last started or reloaded, grouped by status code range. | zone | +| nginx.http.request.processing.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_zone_type | gauge | The number of client requests that are currently being processed. | zone | +| nginx.http.request.discarded | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_zone_type | count | The total number of requests completed without sending a response. | zone | +| nginx.http.request.io | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_io_direction, nginx_zone_name, nginx_zone_type | count | The total number of HTTP bytes transferred (receive/transmit). | zone | +| nginx.http.limit_conn.requests | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_limit_conn_outcome, nginx_zone_name | count | The total number of connections to an endpoint with a limit_conn directive. | zone | +| nginx.http.limit_req.requests | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_limit_req_outcome, nginx_zone_name | count | The total number of requests to an endpoint with a limit_req directive. | zone | + +{{< /table >}} + +### NGINX SSL statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.ssl.handshakes | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_ssl_status, nginx_ssl_handshake_reason | count | The total number of SSL handshakes (successful and failed). | deployment | +| nginx.ssl.certificate.verify_failures | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_ssl_verify_failure_reason | count | The total number of SSL certificate verification failures, categorized by reason. | deployment | + +{{< /table >}} + +### NGINX cache statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.cache.bytes_read | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_cache.outcome, nginx_cache_name | count | The total number of bytes read from the cache or proxied server. | cache | +| nginx.cache.responses | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_cache_outcome, nginx_cache_name | count | The total number of responses read from the cache or proxied server. | cache | +| nginx.cache.memory.limit | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_cache_name | gauge | The limit on the maximum size of the cache specified in the configuration. | cache | +| nginx.cache.memory.usage | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_cache_name | gauge | The current size of the cache. | cache | + +{{< /table >}} + +### NGINX memory statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.slab.page.free | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | gauge | The current number of free memory pages in the shared memory zone. | zone | +| nginx.slab.page.limit | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | gauge | The total number of memory pages (free and used) in the shared memory zone. | zone | +| nginx.slab.page.usage | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | gauge | The current number of used memory pages in the shared memory zone. | zone | +| nginx.slab.page.utilization | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | gauge | The current percentage of used memory pages in the shared memory zone. | zone | +| nginx.slab.slot.usage | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_slab_slot_limit, nginx_zone_name | gauge | The current number of used memory slots. | zone | +| nginx.slab.slot.free | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_slab_slot_limit, nginx_zone_name | gauge | The current number of free memory slots. | zone | +| nginx.slab.slot.allocations | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_slab_slot_limit, nginx_slab_slot_allocation_result, nginx_zone_name | count | The number of attempts to allocate memory of specified size. | zone | + +{{< /table >}} + +### NGINX upstream statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|-----------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.http.upstream.keepalive.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | gauge | The current number of idle keepalive connections per HTTP upstream. | upstream | +| nginx.http.upstream.peer.io | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_io_direction, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of bytes transferred per HTTP upstream peer. | peer | +| nginx.http.upstream.peer.connection.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average number of active connections per HTTP upstream peer. | peer | +| nginx.http.upstream.peer.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_peer_state, nginx_zone_name, nginx_upstream_name | gauge | The current count of peers on the HTTP upstream grouped by state. | upstream | +| nginx.http.upstream.peer.fails | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of unsuccessful attempts to communicate with the HTTP upstream peer. | peer | +| nginx.http.upstream.peer.header.time | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average time to get the response header from the HTTP upstream peer. | peer | +| nginx.http.upstream.peer.health_checks | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_health_check, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of health check requests made to an HTTP upstream peer. | peer | +| nginx.http.upstream.peer.requests | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of client requests forwarded to the HTTP upstream peer. | peer | +| nginx.http.upstream.peer.response.time | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average time to get the full response from the HTTP upstream peer. | peer | +| nginx.http.upstream.peer.responses | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_status_range, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of responses obtained from the HTTP upstream peer grouped by status range. | peer | +| nginx.http.upstream.peer.unavailables | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | Number of times the server became unavailable for client requests. | peer | +| nginx.http.upstream.peer.state | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_peer_state, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | Current state of an upstream peer in deployment (1 if deployed, 0 if not). | peer | +| nginx.http.upstream.queue.limit | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | gauge | The maximum number of requests that can be in the queue at the same time. | upstream | +| nginx.http.upstream.queue.overflows | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | count | The total number of requests rejected due to the queue overflow. | upstream | +| nginx.http.upstream.queue.usage | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | gauge | The current number of requests in the queue. | upstream | +| nginx.http.upstream.zombie.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | gauge | The current number of upstream peers removed from the group but still processing active client requests. | upstream | + +{{< /table >}} + +### NGINX stream statistics + +{{< table >}} + +| **Metric** | **Labels** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.stream.io | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_io_direction, nginx_zone_name | count | The total number of Stream bytes transferred (receive/transmit). | zone | +| nginx.stream.connection.accepted | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | count | The total number of connections accepted from clients. | zone | +| nginx.stream.connection.discarded | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | count | Total number of connections completed without creating a session. | zone | +| nginx.stream.connection.processing.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name | gauge | The number of client connections that are currently being processed. | zone | +| nginx.stream.session.status | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_status_range, nginx_zone_name | count | The total number of completed sessions grouped by status range. | zone | +| nginx.stream.upstream.peer.io | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_io_direction, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of Stream upstream peer bytes transferred. | peer | +| nginx.stream.upstream.peer.connection.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The current number of Stream upstream peer connections. | peer | +| nginx.stream.upstream.peer.connection.time | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average time to connect to the stream upstream peer. | peer | +| nginx.stream.upstream.peer.connections | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of client connections forwarded to this stream upstream peer. | peer | +| nginx.stream.upstream.peer.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_peer_state, nginx_zone_name, nginx_upstream_name | count | The current number of stream upstream peers grouped by state. | upstream | +| nginx.stream.upstream.peer.fails | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address | count | The total number of unsuccessful attempts to communicate with the stream upstream peer. | peer | +| nginx.stream.upstream.peer.health_checks | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_health_check, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | The total number of health check requests made to the stream upstream peer. | peer | +| nginx.stream.upstream.peer.response.time | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average time to receive the last byte of data for the stream upstream peer. | peer | +| nginx.stream.upstream.peer.ttfb.time | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | gauge | The average time to receive the first byte of data for the stream upstream peer. | peer | +| nginx.stream.upstream.peer.unavailables | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | How many times the server became unavailable for client connections due to max_fails threshold. | peer | +| nginx.stream.upstream.peer.state | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_peer_state, nginx_zone_name, nginx_upstream_name, nginx_peer_address, nginx_peer_name | count | Current state of upstream peers in deployment (1 if any peer matches state, 0 if none). | peer | +| nginx.stream.upstream.zombie.count | nginxaas_account_id, nginxaas_namespace, nginxaas_deployment_object_id, nginxaas_deployment_name, nginxaas_deployment_location, nginx_zone_name, nginx_upstream_name | gauge | The current number of peers removed from the group but still processing active client connections. | upstream | + +{{< /table >}} diff --git a/content/nginxaas-google/overview.md b/content/nginxaas-google/overview.md new file mode 100644 index 000000000..22bcc0db4 --- /dev/null +++ b/content/nginxaas-google/overview.md @@ -0,0 +1,71 @@ +--- +title: Overview and architecture +weight: 100 +toc: true +nd-docs: DOCS-000 +url: /nginxaas/google/overview/ +type: +- concept +--- + +## What Is F5 NGINXaaS for Google Cloud? + +NGINXaaS for Google Cloud is a service offering that is tightly integrated into Google Cloud platform and its ecosystem, making applications fast, efficient, and reliable with full lifecycle management of advanced NGINX traffic services. + +[NGINX Plus](https://www.nginx.com/products/nginx/) powers NGINXaaS for Google Cloud, which extends NGINX Open Source with advanced functionality and provides customers with a complete application delivery solution. + +NGINXaaS handles the NGINX Plus license management automatically. + +## Capabilities + +The key capabilities of NGINXaaS for Google Cloud are: + +- Simplifies onboarding by providing a fully managed, ready-to-use NGINX service, eliminating the need for infrastructure setup, manual upgrades, or operational overhead. +- Lowers operational overhead in running and optimizing NGINX. +- Simplifies NGINX deployments with fewer moving parts (edge routing is built into the service). +- Supports migration of existing NGINX configurations to the cloud with minimal effort. +- Integrates with the Google Cloud ecosystem. +- Adopts a consumption-based pricing to align infrastructure costs to actual usage by billing transactions using Google. + +## NGINXaaS for Google Cloud architecture + +{{< img src="nginxaas-google/nginxaas-google-cloud-architecture.svg" alt="Architecture diagram showing how NGINXaaS integrates with Google Cloud. At the top, inside the Google Cloud IaaS layer, NGINX Plus is managed using UI, API, and Terraform, alongside NGINXaaS. Admins connect to this layer. Below, in the Customer VPC, end users connect through Edge Routing to multiple App Servers (labeled App Server 1). NGINX Plus directs traffic to these app servers. The Customer VPC also connects with Google Cloud services such as Secret Manager, Monitoring, and other services. Green arrows show traffic flow from end users through edge routing and NGINX Plus to app servers, while blue arrows show admin access." >}} + +- The NGINXaaS Console is used to create, update, and delete NGINX configurations, certificates and NGINXaaS deployments +- Each NGINXaaS deployment has dedicated network and compute resources. There is no possibility of noisy neighbor problems or data leakage between deployments +- NGINXaaS can route traffic to upstreams even if the upstream servers are located in different geographies. See [Known Issues]({{< ref "/nginxaas-google/known-issues.md" >}}) for any networking restrictions. +- NGINXaaS supports request tracing. See the [Application Performance Management with NGINX Variables](https://www.f5.com/company/blog/nginx/application-tracing-nginx-plus) blog to learn more about tracing. +- Supports HTTP to HTTPS, HTTPS to HTTP, and HTTP to HTTP redirects. NGINXaaS also provides the ability to create new rules for redirecting. See [How to Create NGINX Rewrite Rules | NGINX](https://blog.nginx.org/blog/creating-nginx-rewrite-rules) for more details. +- Google Cloud's Private Service Connect (PSC) enables clients within your Virtual Private Cloud (VPC) to access your NGINXaaS deployments. PSC also provides NGINXaaS a secure and private way to connect to your upstream applications. Known networking limitations can be found in the [Known Issues]({{< ref "/nginxaas-google/known-issues.md" >}}). + +### Geographical Controllers + +NGINXaaS for Google has a global presence with management requests being served from various geographical controllers. A Geographical Controller (GC) is a control plane that serves users in a given geographical boundary while taking into account concerns relating to data residency and localization. Example: A US geographical controller serves US customers. We currently have presence in two Geographies: **US** and **EU**. + +### Networking + +We use Google [Private Service Connect]((https://cloud.google.com/vpc/docs/private-service-connect)) (PSC) to securely connect NGINXaaS to your applications and enable client access to your deployments. A [PSC backend](https://cloud.google.com/vpc/docs/private-service-connect#backends) brings the NGINXaaS deployment into your client network, allowing your application clients to connect seamlessly. A [PSC Interface](https://cloud.google.com/vpc/docs/private-service-connect#interfaces) brings the deployment into your application network, enabling secure connectivity to your applications. This approach gives you full control over traffic flow by leveraging your own networking resources, so you can apply your preferred security controls and ensure a secure deployment environment. + + +## Supported regions + +NGINXaaS for Google Cloud is supported in the following regions per geography: + + {{< table "table" >}} + |NGINXaaS Geography | Google Cloud Regions | + |-----------|---------| + | US | us-west1, us-east1, us-central1 | + | EU | europe-west2, europe-west1 | + {{< /table >}} + +## Limitations + +- As mentioned above, we currently support two geographies with limited regions only. +- We only support authentication via Google acting as an identity provider. +- User Role Based Access Control (RBAC) is not yet supported. +- NGINX Configurations require a specific snippet for an NGINXaaS deployment to work. + - For specifics see [NGINX configuration required content]({{< ref "nginxaas-google/getting-started/nginx-configuration/overview.md#nginx-configuration-required-content" >}}). + +## What's next + +To get started, check the [NGINXaaS for Google Cloud prerequisites]({{< ref "/nginxaas-google/getting-started/prerequisites.md" >}}) diff --git a/content/nginxaas-google/quickstart/_index.md b/content/nginxaas-google/quickstart/_index.md new file mode 100644 index 000000000..54331487f --- /dev/null +++ b/content/nginxaas-google/quickstart/_index.md @@ -0,0 +1,6 @@ +--- +title: Quickstart guides +weight: 600 +url: /nginxaas/google/quickstart/ +draft: true +--- diff --git a/content/nginxaas-google/quickstart/security-controls/_index.md b/content/nginxaas-google/quickstart/security-controls/_index.md new file mode 100644 index 000000000..8353987b5 --- /dev/null +++ b/content/nginxaas-google/quickstart/security-controls/_index.md @@ -0,0 +1,6 @@ +--- +title: Security controls +weight: 500 +url: /nginxaas/google/quicksart/security-controls/ +toc: true +--- diff --git a/content/nginxaas-google/quickstart/security-controls/certificates.md b/content/nginxaas-google/quickstart/security-controls/certificates.md new file mode 100644 index 000000000..dfb724335 --- /dev/null +++ b/content/nginxaas-google/quickstart/security-controls/certificates.md @@ -0,0 +1,25 @@ +--- +title: Use a certificate from Google Cloud Secret Manager +weight: 50 +toc: true +url: /nginxaas/google/quickstart/security-controls/certificates/ +type: +- how-to +--- + + +## Overview + +This guide describes how to use a TLS/SSL certificate stored in Google Cloud Secret Manager with NGINXaaS for Google Cloud. + +## Before you begin + +- [Create a secret in Google Cloud Secret Manager](https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets) +that contains your TLS/SSL certificate and private key. +- Ensure that the NGINXaaS for Google Cloud service account has permission to access the secret. For more information, see +[Granting, changing, and revoking access to secrets] (https://cloud.google.com/iam/docs/granting-changing-revoking-access). + + +## Configure NGINXaaS to use the certificate + +TBD \ No newline at end of file diff --git a/content/nic/configuration/global-configuration/configmap-resource.md b/content/nic/configuration/global-configuration/configmap-resource.md index 4910a66d2..fcfb5910c 100644 --- a/content/nic/configuration/global-configuration/configmap-resource.md +++ b/content/nic/configuration/global-configuration/configmap-resource.md @@ -178,6 +178,17 @@ If you encounter the error `error [emerg] 13#13: "zone_sync" directive is duplic |*zone-sync-resolver-ipv6* | Configures whether the optional [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive for zone-sync will look up IPv6 addresses. NGINX Plus & `zone-sync` Required | `true` | |*zone-sync-resolver-valid* | Configures an [NGINX time](https://nginx.org/en/docs/syntax.html) that the optional [resolver](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) directive for zone-sync will override the TTL value of responses from nameservers with. NGINX Plus & `zone-sync` Required | `5s` | +### OIDC (OpenID Connect) Timeouts +For more information on timeouts, see [here](https://github.com/nginxinc/nginx-openid-connect?tab=readme-ov-file#configuring-the-key-value-store) + +|ConfigMap Key | Description | Default | +| ---| ---| ---| +| *oidc-pkce-timeout* | Sets the timeout for PKCE (Proof Key for Code Exchange) in OIDC. | `90s` | +| *oidc-id-tokens-timeout* | Sets the timeout for ID tokens in OIDC. | `1h` | +| *oidc-access-tokens-timeout* | Sets the timeout for access tokens in OIDC. | `1h` | +| *oidc-refresh-tokens-timeout* | Sets the timeout for refresh tokens in OIDC. | `24h` | +| *oidc-sids-timeout* | Sets the timeout for session IDs in OIDC. | `24h` | + ### Snippets and custom templates diff --git a/content/nic/configuration/policy-resource.md b/content/nic/configuration/policy-resource.md index 8efcc0064..b3c5d9865 100644 --- a/content/nic/configuration/policy-resource.md +++ b/content/nic/configuration/policy-resource.md @@ -33,6 +33,7 @@ spec: ``` {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``accessControl`` | The access control policy based on the client IP address. | [accessControl](#accesscontrol) | No | @@ -69,6 +70,7 @@ accessControl: deny: - 10.0.0.0/8 ``` + {{< call-out "note" >}} The feature is implemented using the NGINX [ngx_http_access_module](http://nginx.org/en/docs/http/ngx_http_access_module.html). NGINX Ingress Controller access control policy supports either allow or deny rules, but not both (as the module does). @@ -76,10 +78,12 @@ The feature is implemented using the NGINX [ngx_http_access_module](http://nginx {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``allow`` | Allows access for the specified networks or addresses. For example, ``192.168.1.1`` or ``10.1.1.0/16``. | ``[]string`` | No | |``deny`` | Denies access for the specified networks or addresses. For example, ``192.168.1.1`` or ``10.1.1.0/16``. | ``[]string`` | No | \* an accessControl must include either `allow` or `deny`. | + {{% /table %}} #### AccessControl Merging Behavior @@ -115,6 +119,7 @@ rateLimit: zoneSize: 10M key: ${binary_remote_addr} ``` + {{< call-out "note" >}} The feature is implemented using the NGINX [ngx_http_limit_req_module](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html). @@ -128,6 +133,7 @@ When the [Zone Sync feature]({{< ref "/nic/configuration/global-configuration/co {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``rate`` | The rate of requests permitted. The rate is specified in requests per second (r/s) or requests per minute (r/m). | ``string`` | Yes | @@ -141,6 +147,7 @@ When the [Zone Sync feature]({{< ref "/nic/configuration/global-configuration/co |``rejectCode`` | Sets the status code to return in response to rejected requests. Must fall into the range ``400..599``. Default is ``503``. | ``int`` | No | |``scale`` | Enables a constant rate-limit by dividing the configured rate by the number of nginx-ingress pods currently serving traffic. This adjustment ensures that the rate-limit remains consistent, even as the number of nginx-pods fluctuates due to autoscaling. **This will not work properly if requests from a client are not evenly distributed across all ingress pods** (Such as with sticky sessions, long lived TCP Connections with many requests, and so forth). In such cases using [zone-sync]({{< ref "/nic/configuration/global-configuration/configmap-resource.md#zone-sync" >}}) instead would give better results. Enabling `zone-sync` will suppress this setting. | ``bool`` | No | |``condition`` | Add a condition to a rate-limit policy. | [ratelimit.condition](#ratelimitcondition) | No | + {{% /table %}} {{< call-out "note" >}} @@ -174,11 +181,13 @@ condition: ``` {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``jwt`` | defines a JWT condition to rate limit against. | [ratelimit.condition.jwt](#ratelimitconditionjwt) | No | |``variables`` | defines a Variable condition to rate limit against. | [ratelimit.condition.variables](#ratelimitconditionvariables) | No | |``default`` | sets the rate limit in this policy to be the default if no conditions are met. In a group of policies with the same condition, only one policy can be the default. | ``bool`` | No | + {{% /table %}} {{< call-out "note" >}} @@ -189,6 +198,7 @@ If conditions are used, a request doesn't match any, and a `default` has been de The rate limit policy with condition is designed to be used in combination with one or more rate limit policies. For example, multiple rate limit policies with [RateLimit.Condition.JWT](#ratelimitconditionjwt) can be used to apply different tiers of rate limit based on the value of a JWT claim. For a practical example of tiered rate limiting by the value of a JWT claim, see the example in our [GitHub repository](https://github.com/nginx/kubernetes-ingress/tree/v{{< nic-version >}}/examples/custom-resources/rate-limit-tiered-jwt-claim/README.md). ### RateLimit.Condition.JWT + {{< call-out "note" >}} This feature is only available with NGINX Plus. @@ -215,10 +225,12 @@ The rate limit policy will only apply to requests that contain a JWT with the sp ``` {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``claim`` | Claim is the JWT claim to be rate limit by. Nested claims should be separated by ".". | ``string`` | Yes | |``match`` | the value of the claim to match against. | ``string`` | Yes | + {{% /table %}} ### RateLimit.Condition.Variables @@ -236,10 +248,12 @@ Only one variable at a time is supported at present. {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``name`` | the name of the NGINX variable to be rate limit by. | ``string`` | Yes | |``match`` | the value of the NGINX variable to match against. Values prefixed with the `~` character denote the following is a [regular expression](https://nginx.org/en/docs/http/ngx_http_map_module.html#map). | ``string`` | Yes | + {{% /table %}} ### APIKey @@ -281,12 +295,14 @@ data: ``` {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``suppliedIn`` | `header` or `query`. | | Yes | |``suppliedIn.header`` | An array of headers that the API Key may appear in. | ``string[]`` | No | |``suppliedIn.query`` | An array of query params that the API Key may appear in. | ``string[]`` | No | |``clientSecret`` | The name of the Kubernetes secret that stores the API Key(s). It must be in the same namespace as the Policy resource. The secret must be of the type ``nginx.org/apikey``, and the API Key(s) must be stored in a key: val format where each key is a unique clientID and each value is a unique base64 encoded API Key | ``string`` | Yes | + {{% /table %}} {{< call-out "important" >}}An APIKey Policy must include a minimum of one of the `suppliedIn.header` or `suppliedIn.query` parameters. Both can also be supplied.{{< /call-out >}} @@ -335,15 +351,18 @@ basicAuth: secret: htpasswd-secret realm: "My API" ``` + {{< call-out "note" >}} The feature is implemented using the NGINX [ngx_http_auth_basic_module](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html). {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``secret`` | The name of the Kubernetes secret that stores the Htpasswd configuration. It must be in the same namespace as the Policy resource. The secret must be of the type ``nginx.org/htpasswd``, and the config must be stored in the secret under the key ``htpasswd``, otherwise the secret will be rejected as invalid. | ``string`` | Yes | |``realm`` | The realm for the basic authentication. | ``string`` | No | + {{% /table %}} #### BasicAuth Merging Behavior @@ -402,11 +421,13 @@ This feature is implemented using the NGINX Plus [ngx_http_auth_jwt_module](http {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``secret`` | The name of the Kubernetes secret that stores the JWK. It must be in the same namespace as the Policy resource. The secret must be of the type ``nginx.org/jwk``, and the JWK must be stored in the secret under the key ``jwk``, otherwise the secret will be rejected as invalid. | ``string`` | Yes | |``realm`` | The realm of the JWT. | ``string`` | Yes | |``token`` | The token specifies a variable that contains the JSON Web Token. By default the JWT is passed in the ``Authorization`` header as a Bearer Token. JWT may be also passed as a cookie or a part of a query string, for example: ``$cookie_auth_token``. Accepted variables are ``$http_``, ``$arg_``, ``$cookie_``. | ``string`` | No | + {{% /table %}} #### JWT Merging Behavior @@ -448,6 +469,7 @@ This feature is implemented using the NGINX Plus directive [auth_jwt_key_request {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | Default | | ---| ---| ---| --- | --- | |``jwksURI`` | The remote URI where the request will be sent to retrieve JSON Web Key set| ``string`` | Yes | -- | @@ -456,6 +478,7 @@ This feature is implemented using the NGINX Plus directive [auth_jwt_key_request |``token`` | The token specifies a variable that contains the JSON Web Token. By default the JWT is passed in the ``Authorization`` header as a Bearer Token. JWT may be also passed as a cookie or a part of a query string, for example: ``$cookie_auth_token``. Accepted variables are ``$http_``, ``$arg_``, ``$cookie_``. | ``string`` | No | -- | |``sniEnabled`` | Enables SNI (Server Name Indication) for the JWT policy. This is useful when the remote server requires SNI to serve the correct certificate. | ``bool`` | No | `false` | |``sniName`` | The SNI name to use when connecting to the remote server. If not set, the hostname from the ``jwksURI`` will be used. | ``string`` | No | -- | + {{% /table %}} {{< call-out "note" >}} @@ -582,7 +605,7 @@ ingressMTLS: verifyDepth: 1 ``` -**IMPORTANT NOTE** +{{< call-out "important" >}} When configuring a CRL with the `ingressMTLS.crlFileName` field, there is additional context to keep in mind: 1. NGINX Ingress Controller will expect the CRL, in this case `webapp.crl`, will be in `/etc/nginx/secrets`. A volume mount will need to be added to NGINX Ingress Controller deployment add your CRL to `/etc/nginx/secrets` @@ -590,13 +613,17 @@ When configuring a CRL with the `ingressMTLS.crlFileName` field, there is additi Please refer to the Kubernetes documentation on [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) to find the best implementation for your environment. +{{< /call-out >}} + {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``clientCertSecret`` | The name of the Kubernetes secret that stores the CA certificate. It must be in the same namespace as the Policy resource. The secret must be of the type ``nginx.org/ca``, and the certificate must be stored in the secret under the key ``ca.crt``, otherwise the secret will be rejected as invalid. | ``string`` | Yes | |``verifyClient`` | Verification for the client. Possible values are ``"on"``, ``"off"``, ``"optional"``, ``"optional_no_ca"``. The default is ``"on"``. | ``string`` | No | |``verifyDepth`` | Sets the verification depth in the client certificates chain. The default is ``1``. | ``int`` | No | |``crlFileName`` | The file name of the Certificate Revocation List. NGINX Ingress Controller will look for this file in `/etc/nginx/secrets` | ``string`` | No | + {{% /table %}} #### IngressMTLS Merging Behavior @@ -632,6 +659,7 @@ The feature is implemented using the NGINX [ngx_http_proxy_module](https://nginx {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``tlsSecret`` | The name of the Kubernetes secret that stores the TLS certificate and key. It must be in the same namespace as the Policy resource. The secret must be of the type ``kubernetes.io/tls``, the certificate must be stored in the secret under the key ``tls.crt``, and the key must be stored under the key ``tls.key``, otherwise the secret will be rejected as invalid. | ``string`` | No | @@ -643,6 +671,7 @@ The feature is implemented using the NGINX [ngx_http_proxy_module](https://nginx |``sslName`` | Allows overriding the server name used to verify the certificate of the upstream HTTPS server. | ``string`` | No | |``ciphers`` | Specifies the enabled ciphers for requests to an upstream HTTPS server. The default is ``DEFAULT``. | ``string`` | No | |``protocols`` | Specifies the protocols for requests to an upstream HTTPS server. The default is ``TLSv1 TLSv1.1 TLSv1.2``. | ``string`` | No | > Note: the value of ``ciphers`` and ``protocols`` is not validated by NGINX Ingress Controller. As a result, NGINX can fail to reload the configuration. To ensure that the configuration for a VirtualServer/VirtualServerRoute that references the policy was successfully applied, check its [status]({{< ref "/nic/configuration/global-configuration/reporting-resources-status.md#virtualserver-and-virtualserverroute-resources" >}}). The validation will be added in the future releases. | + {{% /table %}} #### EgressMTLS Merging Behavior @@ -707,6 +736,7 @@ The configuration in the example doesn't enable TLS and the synchronization betw The OIDC policy defines a few internal locations that can't be customized: `/_jwks_uri`, `/_token`, `/_refresh`, `/_id_token_validation`, `/logout`. In addition, as explained below, `/_codexch` is the default value for redirect URI, and `/_logout` is the default value for post logout redirect URI, both of which can be customized. Specifying one of these locations as a route in the VirtualServer or VirtualServerRoute will result in a collision and NGINX Plus will fail to reload. {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``clientID`` | The client ID provided by your OpenID Connect provider. | ``string`` | Yes | @@ -722,6 +752,7 @@ The OIDC policy defines a few internal locations that can't be customized: `/_jw |``zoneSyncLeeway`` | Specifies the maximum timeout in milliseconds for synchronizing ID/access tokens and shared values between Ingress Controller pods. The default is ``200``. | ``int`` | No | |``accessTokenEnable`` | Option of whether Bearer token is used to authorize NGINX to access protected backend. | ``boolean`` | No | |``pkceEnable`` | Switches Proof Key for Code Exchange on. The OpenID client needs to be in public mode. `clientSecret` is not used in this mode. | ``boolean`` | No | + {{% /table %}} {{< call-out "note" >}} @@ -777,6 +808,7 @@ The feature is implemented using the NGINX [ngx_http_proxy_module](https://nginx {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | --- | ---| ---| --- | | ``cacheZoneName`` | CacheZoneName defines the name of the cache zone. Must start with a lowercase letter,followed by alphanumeric characters or underscores, and end with an alphanumeric character. Single lowercase letters are also allowed. Examples: "cache", "my_cache", "cache1". | ``string`` | Yes | @@ -787,6 +819,7 @@ The feature is implemented using the NGINX [ngx_http_proxy_module](https://nginx |``levels`` | Levels defines the cache directory hierarchy levels for storing cached files. Must be in format "X:Y" or "X:Y:Z" where X, Y, Z are either 1 or 2. This controls the number of subdirectory levels and their name lengths. Examples: "1:2", "2:2", "1:2:2". Invalid: "3:1", "1:3", "1:2:3". | ``string`` | No | |``overrideUpstreamCache`` | OverrideUpstreamCache controls whether to override upstream cache headers (using proxy_ignore_headers directive). When true, NGINX will ignore cache-related headers from upstream servers like Cache-Control, Expires etc, Default: false. | ``bool`` | No | |``cachePurgeAllow`` | CachePurgeAllow defines IP addresses or CIDR blocks allowed to purge cache. This feature is only available in NGINX Plus. Examples: ["192.168.1.100", "10.0.0.0/8", "::1"]. | ``[]string`` | No | + {{% /table %}} #### Cache Merging Behavior @@ -795,7 +828,7 @@ A VirtualServer/VirtualServerRoute can reference multiple cache policies. Howeve ### WAF -{{< call-out "note" >}} The feature is implemented using the NGINX Plus [F5 WAF for NGINX Module]({{< ref "/nap-waf/" >}}). {{< /call-out >}} +{{< call-out "note" >}} The feature is implemented using the NGINX Plus [F5 WAF for NGINX module]({{< ref "/waf/" >}}). {{< /call-out >}} The WAF policy configures NGINX Plus to secure client requests using F5 WAF for NGINX policies. @@ -814,18 +847,34 @@ waf: logDest: "syslog:server=syslog-svc-secondary.default:514" ``` -{{< call-out "note" >}} The field `waf.securityLog` is deprecated and will be removed in future releases.It will be ignored if `waf.securityLogs` is populated. {{< /call-out >}} +{{< call-out "note" >}} The field `waf.securityLog` is deprecated and will be removed in future releases. It will be ignored if `waf.securityLogs` is populated. {{< /call-out >}} {{% table %}} + |Field | Description | Type | Required | | ---| ---| ---| --- | |``enable`` | Enables F5 WAF for NGINX. | ``bool`` | Yes | |``apPolicy`` | The [F5 WAF for NGINX policy]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-policies" >}}) of the WAF. Accepts an optional namespace. Mutually exclusive with ``apBundle``. | ``string`` | No | |``apBundle`` | The [F5 WAF for NGINX policy bundle]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-bundles" >}}). Mutually exclusive with ``apPolicy``. | ``string`` | No | -|``securityLog.enable`` | Enables security log. | ``bool`` | No | -|``securityLog.apLogConf`` | The [F5 WAF for NGINX log conf]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-logs" >}}) resource. Accepts an optional namespace. Only works with ``apPolicy``. | ``string`` | No | -|``securityLog.apLogBundle`` | The [F5 WAF for NGINX log bundle]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-bundles" >}}) resource. Only works with ``apBundle``. | ``string`` | No | -|``securityLog.logDest`` | The log destination for the security log. Only accepted variables are ``syslog:server=; localhost; fqdn>:``, ``stderr``, ````. | ``string`` | No | +|``securityLog.enable`` | **Deprecated:** Enables security log. | ``bool`` | No | +|``securityLog.apLogConf`` | **Deprecated:** The [F5 WAF for NGINX log conf]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-logs" >}}) resource. Accepts an optional namespace. Only works with ``apPolicy``. | ``string`` | No | +|``securityLog.apLogBundle`` | **Deprecated:** The [F5 WAF for NGINX log bundle]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-bundles" >}}) resource. Only works with ``apBundle``. | ``string`` | No | +|``securityLog.logDest`` | **Deprecated:** The log destination for the security log. Only accepted variables are ``syslog:server=; localhost; :``, ``stderr``, ````. | ``string`` | No | +|``securityLogs`` | Config for security log destinations. | [waf.securityLogs](#wafsecurityLogs) | No | + +{{% /table %}} + +#### WAF.SecurityLogs + +{{% table %}} + +|Field | Description | Type | Required | +| ---| ---| ---| --- | +|``enable`` | Enables security log. | ``bool`` | No | +|``apLogConf`` | The [App Protect WAF log conf]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-logs" >}}) resource. Accepts an optional namespace. Only works with ``apPolicy``. | ``string`` | No | +|``apLogBundle`` | The [App Protect WAF log bundle]({{< ref "/nic/installation/integrations/app-protect-waf/configuration.md#waf-bundles" >}}) resource. Only works with ``apBundle``. | ``string`` | No | +|``logDest`` | The log destination for the security log. Only accepted variables are ``syslog:server=; localhost; :``, ``stderr``, ````. | ``string`` | No | + {{% /table %}} #### WAF Merging Behavior diff --git a/content/nic/configuration/security.md b/content/nic/configuration/security.md index 9317893db..a09f091be 100644 --- a/content/nic/configuration/security.md +++ b/content/nic/configuration/security.md @@ -28,13 +28,16 @@ By default, the ServiceAccount has access to all Secret resources in the cluster [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) are required by NGINX Ingress Controller for certificates and privacy keys, which Kubernetes stores unencrypted by default. We recommend following the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) to store these Secrets using at-rest encryption. - ## NGINX Ingress Controller recommendations ### Configure root filesystem as read-only {{< call-out "caution" >}} - This feature is compatible with [F5 WAF for NGINX v5]({{< ref "/nap-waf/v5/" >}}). It is not compatible with [F5 WAF for NGINX v4]({{< ref "/nap-waf/v4/" >}}) or [F5 DoS for NGINX]({{< ref "/nap-dos/" >}}). + +This feature has its own documentation in [F5 WAF for NGINX]({{< ref "/waf/configure/kubernetes-read-only.md" >}}) documentation. + +It is compatible with a Kubernetes deployment: it **is not** compatible with [F5 DoS for NGINX]({{< ref "/nap-dos/" >}}). + {{< /call-out >}} NGINX Ingress Controller is designed to be resilient against attacks in various ways, such as running the service as non-root to avoid changes to files. We recommend setting filesystems on all containers to read-only, this includes `nginx-ingress-controller`, though also includes `waf-enforcer` and `waf-config-mgr` when F5 WAF for NGINXv5 is in use. This is so that the attack surface is further reduced by limiting changes to binaries and libraries. diff --git a/content/nic/configuration/virtualserver-and-virtualserverroute-resources.md b/content/nic/configuration/virtualserver-and-virtualserverroute-resources.md index 4899b0fdb..761929b9e 100644 --- a/content/nic/configuration/virtualserver-and-virtualserverroute-resources.md +++ b/content/nic/configuration/virtualserver-and-virtualserverroute-resources.md @@ -493,20 +493,6 @@ See the [`sticky`](https://nginx.org/en/docs/http/ngx_http_upstream_module.html? |``secure`` | Adds the ``Secure`` attribute to the cookie. | ``boolean`` | No | |``samesite`` | Adds the ``SameSite`` attribute to the cookie. The allowed values are: ``strict``, ``lax``, ``none`` | ``string`` | No | -### Header - -The header defines an HTTP Header: - -```yaml -name: Host -value: example.com -``` - -|Field | Description | Type | Required | -| ---| ---| ---| --- | -|``name`` | The name of the header. | ``string`` | Yes | -|``value`` | The value of the header. | ``string`` | No | - ### Action The action defines an action to perform for a request. diff --git a/content/nic/installation/installing-nic/deploy-with-nap-using-helm.md b/content/nic/installation/installing-nic/deploy-with-nap-using-helm.md index 06b22e111..231bc2e2c 100644 --- a/content/nic/installation/installing-nic/deploy-with-nap-using-helm.md +++ b/content/nic/installation/installing-nic/deploy-with-nap-using-helm.md @@ -44,7 +44,7 @@ This is accomplished with the following steps: Pull the `waf-compiler` image with: ```shell -docker pull private-registry.nginx.com/nap/waf-compiler:5.8.0 +docker pull private-registry.nginx.com/nap/waf-compiler:5.9.0 ``` Download the [provided WAF Policy JSON](https://raw.githubusercontent.com/nginx/kubernetes-ingress/main/tests/data/ap-waf-v5/wafv5.json): @@ -53,13 +53,13 @@ Download the [provided WAF Policy JSON](https://raw.githubusercontent.com/nginx/ curl -L https://raw.githubusercontent.com/nginx/kubernetes-ingress/main/tests/data/ap-waf-v5/wafv5.json -o /tmp/wafv5.json ``` -Use your pulled NAP Docker image (`private-registry.nginx.com/nap/waf-compiler:5.8.0`) to compile the policy bundle: +Use your pulled NAP Docker image (`private-registry.nginx.com/nap/waf-compiler:5.9.0`) to compile the policy bundle: ```shell # Using your newly created image docker run --rm \ -v /tmp:/tmp \ - private-registry.nginx.com/nap/waf-compiler:5.8.0 \ + private-registry.nginx.com/nap/waf-compiler:5.9.0 \ -p /tmp/wafv5.json \ -o /tmp/compiled_policy.tgz ``` diff --git a/content/nic/installation/integrations/app-protect-waf-v5/compile-waf-policies.md b/content/nic/installation/integrations/app-protect-waf-v5/compile-waf-policies.md index c7ba92fc3..fdb8b7233 100644 --- a/content/nic/installation/integrations/app-protect-waf-v5/compile-waf-policies.md +++ b/content/nic/installation/integrations/app-protect-waf-v5/compile-waf-policies.md @@ -56,6 +56,7 @@ In the same directory you created `simple-policy.json`, create a POST request fo ```shell curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @simple-policy.json ``` @@ -105,7 +106,7 @@ Create the file `security-policy-bundles.json`: { "bundles": [ { - "appProtectWAFVersion": "4.815.0", + "appProtectWAFVersion": "{{< appprotect-compiler-version >}}", "policyName": "Nginxbundletest", "policyUID": "", "attackSignatureVersionDateTime": "latest", @@ -122,6 +123,7 @@ Send a POST request to create the bundle through the API: ```shell curl -X POST https://{{NMS_FQDN}}/api/platform/v1/security/policies/bundles \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @security-policy-bundles.json ``` @@ -137,7 +139,7 @@ You should receive a response similar to the following: }, "content": "", "metadata": { - "appProtectWAFVersion": "4.815.0", + "appProtectWAFVersion": "{{< appprotect-compiler-version >}}", "attackSignatureVersionDateTime": "2024.02.21", "created": "2024-06-12T13:28:20.023775785-07:00", "modified": "2024-06-12T13:28:20.023775785-07:00", @@ -167,7 +169,7 @@ curl --location 'https://127.0.0.1/api/platform/v1/security/policies/bundles' \ }, "content": "", "metadata": { - "appProtectWAFVersion": "4.815.0", + "appProtectWAFVersion": "{{< appprotect-compiler-version >}}", "attackSignatureVersionDateTime": "2024.02.21", "created": "2024-06-13T09:09:10.809-07:00", "modified": "2024-06-13T09:09:20-07:00", @@ -191,7 +193,7 @@ It is one of two unique IDs we will use to download the bundle: it will be refer --- -## Download the security bundle +## Download the security policy bundle Use a GET request to download the security bundle using the policy and bundle IDs: @@ -207,6 +209,14 @@ curl -X GET -k 'https://127.0.0.1/api/platform/v1/security/policies/6af9f261-658 | jq -r '.content' | base64 -d > security-policy-bundle.tgz ``` +## Download the security log bundle + +Use a GET request to download the `secops_dashboard` security log bundle. The security log bundle adjusts the format of the policy events to be compatible with NGINX Instance Manager: + +```shell +curl -X GET "https://{NMS_FQDN}/api/platform/v1/security/logprofiles/secops_dashboard/{{< appprotect-compiler-version >}}/bundle" -H "Authorization: Bearer " | jq -r .compiledBundle | base64 -d > secops_dashboard.tgz +``` + --- ## Add volumes and volumeMounts @@ -312,11 +322,61 @@ spec: --- +## Upload the security log bundle + +Upload the security log bundle binary file to the NGINX Ingress Controller pods. + +{{}} + +{{%tab name="Helm"%}} + +```shell +kubectl cp /your/local/path/secops_dashboard.tgz /:etc/app_protect/bundles/secops_dashboard.tgz -c nginx-ingress +``` + +{{% /tab %}} + +{{%tab name="Manifests"%}} + +```shell +kubectl cp /your/local/path/secops_dashboard.tgz /:etc/app_protect/bundles/secops_dashboard.tgz -c nginx-plus-ingress +``` + +{{% /tab %}} + +{{% /tabs %}} + +## Upload the security policy bundle + +Upload the binary file to the NGINX Ingress Controller pods. + +{{}} + +{{%tab name="Helm"%}} + +```shell +kubectl cp /your/local/path/.tgz /:etc/app_protect/bundles.tgz -c nginx-ingress +``` + +{{% /tab %}} + +{{%tab name="Manifests"%}} + +```shell +kubectl cp /your/local/path/.tgz /:etc/app_protect/bundles.tgz -c nginx-plus-ingress +``` + +{{% /tab %}} + +{{% /tabs %}} + +--- + ## Create WAF policy To process a bundle, you must create a new WAF policy. This policy is added to `/etc/app_protect/bundles`, allowing NGINX Ingress Controller to load it into WAF. -The example below shows the required WAF policy, and the *apBundle* and *apLogConf* fields you must use for the security bundle binary file (A tar ball). +The example below shows the required WAF policy, for the *apBundle* field you must use the [security bundle](#download-the-security-policy-bundle) binary file (a tarball). The *apLogBundle* field contains the `secops_dashboard.tgz` [file](#download-the-security-log-bundle). ```yaml apiVersion: k8s.nginx.org/v1 @@ -329,7 +389,7 @@ spec: apBundle: ".tgz" securityLogs: - enable: true - apLogBundle: ".tgz" + apLogBundle: "secops_dashboard.tgz" logDest: "" ``` @@ -358,14 +418,4 @@ spec: pass: webapp ``` ---- - -## Upload the security bundle - -To finish adding a security bundle, upload the binary file to the NGINX Ingress Controller pods. - -```shell -kubectl cp /your/local/path/.tgz /:etc/app_protect/bundles.tgz -c nginx-plus-ingress -``` - -Once the bundle has been uploaded to the cluster, NGINX Ingress Controller will detect and automatically load the new WAF policy. +Your `VirtualServer` should now apply the generated security policy to your traffic and emit security events to NGINX Instance Manager. diff --git a/content/nic/installation/integrations/app-protect-waf-v5/configuration.md b/content/nic/installation/integrations/app-protect-waf-v5/configuration.md index 9fd08381e..84880f565 100644 --- a/content/nic/installation/integrations/app-protect-waf-v5/configuration.md +++ b/content/nic/installation/integrations/app-protect-waf-v5/configuration.md @@ -9,7 +9,7 @@ nd-docs: DOCS-1866 ## Overview -This document explains how to use F5 NGINX Ingress Controller to configure [F5 WAF for NGINX v5]({{< ref "/nap-waf/v5/" >}}). +This document explains how to use F5 NGINX Ingress Controller to configure [F5 WAF for NGINX v5]({{< ref "/waf/" >}}). {{< call-out "note" >}} There are complete NGINX Ingress Controller with F5 WAF for NGINX [example resources on GitHub](https://github.com/nginx/kubernetes-ingress/tree/v{{< nic-version >}}/examples/custom-resources/app-protect-waf-v5). {{< /call-out >}} @@ -97,14 +97,12 @@ Create the application deployment and service: Create the syslog service and pod for the F5 WAF for NGINX security logs: - ```shell kubectl apply -f https://raw.githubusercontent.com/nginx/kubernetes-ingress/v{{< nic-version >}}/examples/custom-resources/app-protect-waf-v5/syslog.yaml ``` ### Deploy the WAF Policy - {{< call-out "note" >}} Configuration settings in the Policy resource enable WAF protection by configuring F5 WAF for NGINX with the log configuration created in the previous step. The policy bundle referenced as `your_policy_bundle_name.tgz` need to be created and placed in the `/etc/app_protect/bundles` volume first.{{< /call-out >}} Create and deploy the WAF policy. @@ -113,7 +111,6 @@ Create and deploy the WAF policy. kubectl apply -f https://raw.githubusercontent.com/nginx/kubernetes-ingress/v{{< nic-version >}}/examples/custom-resources/app-protect-waf-v5/waf.yaml ``` - ### Configure load balancing {{< call-out "note" >}} VirtualServer references the `waf-policy` created in Step 3.{{< /call-out >}} @@ -124,7 +121,6 @@ Create and deploy the WAF policy. kubectl apply -f https://raw.githubusercontent.com/nginx/kubernetes-ingress/v{{< nic-version >}}/examples/custom-resources/app-protect-waf-v5/virtual-server.yaml ``` - ### Test the application To access the application, curl the coffee and the tea services. Use the `--resolve` option to set the Host header of a request with `webapp.example.com` diff --git a/content/nic/installation/integrations/app-protect-waf-v5/troubleshoot-app-protect-waf.md b/content/nic/installation/integrations/app-protect-waf-v5/troubleshoot-app-protect-waf.md index 43dddc95e..cee3595d1 100644 --- a/content/nic/installation/integrations/app-protect-waf-v5/troubleshoot-app-protect-waf.md +++ b/content/nic/installation/integrations/app-protect-waf-v5/troubleshoot-app-protect-waf.md @@ -11,20 +11,20 @@ This document describes how to troubleshoot problems when using NGINX Ingress Co For general troubleshooting of NGINX Ingress Controller, check the general [troubleshooting]({{< ref "/nic/troubleshooting/troubleshoot-common" >}}) documentation. -{{< call-out "note" >}} You can find more troubleshooting tips in the F5 WAF for NGINX [troubleshooting guide]({{< ref "/nap-waf/v5/troubleshooting-guide/troubleshooting.md" >}}) {{< /call-out>}}. - ## Potential problems The table below categorizes some potential problems with NGINX Ingress Controller when F5 WAF for NGINX module is enabled. It suggests how to troubleshoot those problems, using one or more methods from the next section. -{{% table %}} +{{< table >}} + |Problem area | Symptom | Troubleshooting method | Common cause | | ---| ---| ---| --- | |Start. | The Ingress Controller fails to start. | Check the logs. | Misconfigured policy bundle. | |Start | The configuration is not applied. | Check if a policy bundle is compiled using version of the compiler running in NGINX Ingress Controller. | Policy bundle is invalid. | |Start | The configuration is not applied. | Check if bundle is present in a volume. | Policy bundle is not present in the mounted volume. | |APLogConf, Policy or Ingress Resource. | The configuration is not applied. | Check the events of the APLogConf, Policy and Ingress Resource, check the logs, replace the policy bundle. | Policy bundle is invalid. | -{{% /table %}} + +{{< /table >}} ## Troubleshooting methods diff --git a/content/nic/releases.md b/content/nic/releases.md index 51bb3ef43..24db3cb59 100644 --- a/content/nic/releases.md +++ b/content/nic/releases.md @@ -6,17 +6,46 @@ nd-content-type: reference nd-product: NIC nd-docs: DOCS-616 --- + + +## 5.2.1 + +10 Oct 2025 + +### Fixes + +- [8302](https://github.com/nginx/kubernetes-ingress/pull/8302) Remove type field for objects with schema ref + +### Dependencies + +- [8321](https://github.com/nginx/kubernetes-ingress/pull/8321), [8330](https://github.com/nginx/kubernetes-ingress/pull/8330), [8352](https://github.com/nginx/kubernetes-ingress/pull/8352) & [8358](https://github.com/nginx/kubernetes-ingress/pull/8358) Bump Go dependencies +- [8280](https://github.com/nginx/kubernetes-ingress/pull/8280), [8291](https://github.com/nginx/kubernetes-ingress/pull/8291), [8331](https://github.com/nginx/kubernetes-ingress/pull/8331) & [8320](https://github.com/nginx/kubernetes-ingress/pull/8320) Bump Docker dependencies +- [8348](https://github.com/nginx/kubernetes-ingress/pull/8348) Update F5 WAF for NGINX 5.9.0 +- [8273](https://github.com/nginx/kubernetes-ingress/pull/8273) Update dependencies + +### Upgrade + +- For NGINX, use the 5.2.1 images from our [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/tags?page=1&ordering=last_updated&name=5.2.1), [GitHub Container](https://github.com/nginx/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress). +- For NGINX Plus, use the 5.2.1 images from the F5 Container registry or build your own image using the 5.2.1 source code. +- For Helm, use version 2.3.1 of the chart. + +### Supported Platforms + +We will provide technical support for NGINX Ingress Controller on any Kubernetes platform that is currently supported by its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.27-1.34. + ## 5.2.0 15 Sept 2025 This NGINX Ingress Controller release focuses on enhancing performance, simplifying configurations, and improving security to better support modern application needs. The highlights of this release are as follows: + - NGINX Content Cache using policies: This new feature introduces policy configurations that enable proxy caching. - Support for Kubernetes `StatefulSet` Objects: Added support for Kubernetes `StatefulSet` objects, which can also be used to provide persistent storage for cached content. - Auto-Adjusting incompatible proxy buffer directive values: A new `-enable-directive-autoadjust` parameter has been added. When enabled, this feature automatically resolves common proxy buffer configuration dependencies that could cause issues during NGINX reloads. - Server Name Indication (SNI) support in JWT Policies: Users can now configure sniName and sniEnabled for scenarios where the remote server requires SNI to present the correct certificate ### Features + - [8005](https://github.com/nginx/kubernetes-ingress/pull/8005) Add nginx content cache as NIC cache policy - [8159](https://github.com/nginx/kubernetes-ingress/pull/8159) Statefulset support - [8133](https://github.com/nginx/kubernetes-ingress/pull/8133) Add support for automatic adjustment of buffer related directives @@ -27,45 +56,48 @@ This NGINX Ingress Controller release focuses on enhancing performance, simplify - [8142](https://github.com/nginx/kubernetes-ingress/pull/8142) Add globalconfigurationcustomname parameter - [8195](https://github.com/nginx/kubernetes-ingress/pull/8195) Add support for fips 140-3 compliance - - ### Dependencies + - [8208](https://github.com/nginx/kubernetes-ingress/pull/8208) Update Nginx agent to 3.3 - [7959](https://github.com/nginx/kubernetes-ingress/pull/7959), [7983](https://github.com/nginx/kubernetes-ingress/pull/7983), [8037](https://github.com/nginx/kubernetes-ingress/pull/8037), [8057](https://github.com/nginx/kubernetes-ingress/pull/8057), [8083](https://github.com/nginx/kubernetes-ingress/pull/8083), [8096](https://github.com/nginx/kubernetes-ingress/pull/8096), [8126](https://github.com/nginx/kubernetes-ingress/pull/8126), [8143](https://github.com/nginx/kubernetes-ingress/pull/8143), [8183](https://github.com/nginx/kubernetes-ingress/pull/8183), [8186](https://github.com/nginx/kubernetes-ingress/pull/8186), [8200](https://github.com/nginx/kubernetes-ingress/pull/8200), [8231](https://github.com/nginx/kubernetes-ingress/pull/8231) Bump Go dependencies - [7946](https://github.com/nginx/kubernetes-ingress/pull/7946), [7961](https://github.com/nginx/kubernetes-ingress/pull/7961), [7977](https://github.com/nginx/kubernetes-ingress/pull/7977), [7979](https://github.com/nginx/kubernetes-ingress/pull/7979), [7978](https://github.com/nginx/kubernetes-ingress/pull/7978), [7984](https://github.com/nginx/kubernetes-ingress/pull/7984), [7996](https://github.com/nginx/kubernetes-ingress/pull/7996), [8012](https://github.com/nginx/kubernetes-ingress/pull/8012), [8036](https://github.com/nginx/kubernetes-ingress/pull/8036), [8044](https://github.com/nginx/kubernetes-ingress/pull/8044), [8063](https://github.com/nginx/kubernetes-ingress/pull/8063), [8085](https://github.com/nginx/kubernetes-ingress/pull/8085), [8107](https://github.com/nginx/kubernetes-ingress/pull/8107), [8114](https://github.com/nginx/kubernetes-ingress/pull/8114), [8128](https://github.com/nginx/kubernetes-ingress/pull/8128), [8134](https://github.com/nginx/kubernetes-ingress/pull/8134), [8147](https://github.com/nginx/kubernetes-ingress/pull/8147), [8154](https://github.com/nginx/kubernetes-ingress/pull/8154), [8173](https://github.com/nginx/kubernetes-ingress/pull/8173), [8188](https://github.com/nginx/kubernetes-ingress/pull/8188), [8228](https://github.com/nginx/kubernetes-ingress/pull/8228), [8239](https://github.com/nginx/kubernetes-ingress/pull/8239), [8235](https://github.com/nginx/kubernetes-ingress/pull/8235), [8246](https://github.com/nginx/kubernetes-ingress/pull/8246) Bump Docker dependencies ### Upgrade + - For NGINX, use the 5.2.0 images from our [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/tags?page=1&ordering=last_updated&name=5.2.0), [GitHub Container](https://github.com/nginx/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress). - For NGINX Plus, use the 5.2.0 images from the F5 Container registry or build your own image using the 5.2.0 source code. - For Helm, use version 2.3.0 of the chart. ### Supported Platforms + We will provide technical support for NGINX Ingress Controller on any Kubernetes platform that is currently supported by its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.26-1.34. ---- ## 5.1.1 15 Aug 2025 ### Fixes + - [8046](https://github.com/nginx/kubernetes-ingress/pull/8046) Update interval checks for mgmt directive - [8079](https://github.com/nginx/kubernetes-ingress/pull/8079) Status updates for vs endpoints - [8125](https://github.com/nginx/kubernetes-ingress/pull/8125) Don't send request headers & body to jwks uri ### Dependencies + - [8115](https://github.com/nginx/kubernetes-ingress/pull/8115) & [8131](https://github.com/nginx/kubernetes-ingress/pull/8131) Bump Go dependencies - [8030](https://github.com/nginx/kubernetes-ingress/pull/8030), [8080](https://github.com/nginx/kubernetes-ingress/pull/8080) & [8112](https://github.com/nginx/kubernetes-ingress/pull/8112) Bump Docker dependencies - [8139](https://github.com/nginx/kubernetes-ingress/pull/8139) Update to NGINX OSS 1.29.1, NGINX Plus r35, NGINX Agent v3.2, NGINX App Protect 4.16.0 & 5.8.0, and Alpine Linux 3.22 ### Upgrade + - For NGINX, use the 5.1.1 images from our [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/tags?page=1&ordering=last_updated&name=5.1.1), [GitHub Container](https://github.com/nginx/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress). - For NGINX Plus, use the 5.1.1 images from the F5 Container registry or build your own image using the 5.1.1 source code. - For Helm, use version 2.2.2 of the chart. ### Supported Platforms + We will provide technical support for NGINX Ingress Controller on any Kubernetes platform that is currently supported by its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.33. ---- ## 5.1.0 08 Jul 2025 @@ -77,6 +109,7 @@ This release also includes the ability to configure Rate Limiting for your APIs Lastly, in our previous v5.0.0 release, we removed support for OpenTracing. This release replaces that observability capability with native [NGINX OpenTelemetry]({{< ref "/nic/logging-and-monitoring/opentelemetry.md" >}}) traces, allowing you to monitor the traffic of your applications. ### Features + - [7642](https://github.com/nginx/kubernetes-ingress/pull/7642) Add [OpenTelemetry support]({{< ref "/nic/logging-and-monitoring/opentelemetry.md" >}}) - [7916](https://github.com/nginx/kubernetes-ingress/pull/7916) Add support for NGINX Agent version 3 and NGINX One Console - [7884](https://github.com/nginx/kubernetes-ingress/pull/7884) Tiered rate limits with variables @@ -116,7 +149,6 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.33. ---- ## 5.0.0 16 Apr 2025 @@ -142,9 +174,11 @@ For users who have previously installed OIDC or used the `zone_sync` directive w Open Source NGINX Ingress Controller architectures `armv7`, `s390x` & `ppc64le` are deprecated and will be removed in the next minor release. ### Breaking Changes + - [7633](https://github.com/nginx/kubernetes-ingress/pull/7633) & [7567](https://github.com/nginx/kubernetes-ingress/pull/7567) Remove OpenTracing support ### Features + - [7054](https://github.com/nginx/kubernetes-ingress/pull/7054) Increase port number range - [7175](https://github.com/nginx/kubernetes-ingress/pull/7175) Ratelimit based on JWT claim - [7205](https://github.com/nginx/kubernetes-ingress/pull/7205), [7238](https://github.com/nginx/kubernetes-ingress/pull/7238), [7390](https://github.com/nginx/kubernetes-ingress/pull/7390) & [7393](https://github.com/nginx/kubernetes-ingress/pull/7393) Tiered Rate limit groups with JWT claim @@ -153,15 +187,18 @@ Open Source NGINX Ingress Controller architectures `armv7`, `s390x` & `ppc64le` - [7299](https://github.com/nginx/kubernetes-ingress/pull/7299) & [7597](https://github.com/nginx/kubernetes-ingress/pull/7597) Add support for NGINX OSS 1.27.4, NGINX Plus R34 & F5 WAF for NGINX 4.13 & 5.6 ### Fixes + - [7121](https://github.com/nginx/kubernetes-ingress/pull/7121) Clean up and fix for NIC Pod failing to bind when NGINX exits unexpectedly - [7185](https://github.com/nginx/kubernetes-ingress/pull/7185) Correct typo in helm lease annotations template - [7400](https://github.com/nginx/kubernetes-ingress/pull/7400) Add tracking.info and copy into plus images - [7519](https://github.com/nginx/kubernetes-ingress/pull/7519) Add NGINX state directory for ReadOnlyRootFilesystem ### Helm Chart + - [7318](https://github.com/nginx/kubernetes-ingress/pull/7318) Allow customization of service http and https port names through helm ### Dependencies + - [6964](https://github.com/nginx/kubernetes-ingress/pull/6964), [6970](https://github.com/nginx/kubernetes-ingress/pull/6970), [6978](https://github.com/nginx/kubernetes-ingress/pull/6978), [6992](https://github.com/nginx/kubernetes-ingress/pull/6992), [7017](https://github.com/nginx/kubernetes-ingress/pull/7017), [7052](https://github.com/nginx/kubernetes-ingress/pull/7052), [7105](https://github.com/nginx/kubernetes-ingress/pull/7105), [7131](https://github.com/nginx/kubernetes-ingress/pull/7131), [7122](https://github.com/nginx/kubernetes-ingress/pull/7122), [7138](https://github.com/nginx/kubernetes-ingress/pull/7138), [7149](https://github.com/nginx/kubernetes-ingress/pull/7149), [7162](https://github.com/nginx/kubernetes-ingress/pull/7162), [7225](https://github.com/nginx/kubernetes-ingress/pull/7225), [7240](https://github.com/nginx/kubernetes-ingress/pull/7240), [7262](https://github.com/nginx/kubernetes-ingress/pull/7262), [7290](https://github.com/nginx/kubernetes-ingress/pull/7290), [7312](https://github.com/nginx/kubernetes-ingress/pull/7312), [7345](https://github.com/nginx/kubernetes-ingress/pull/7345), [7362](https://github.com/nginx/kubernetes-ingress/pull/7362), [7375](https://github.com/nginx/kubernetes-ingress/pull/7375), [7385](https://github.com/nginx/kubernetes-ingress/pull/7385), [7415](https://github.com/nginx/kubernetes-ingress/pull/7415), [7403](https://github.com/nginx/kubernetes-ingress/pull/7403), [7435](https://github.com/nginx/kubernetes-ingress/pull/7435), [7459](https://github.com/nginx/kubernetes-ingress/pull/7459), [7472](https://github.com/nginx/kubernetes-ingress/pull/7472), [7483](https://github.com/nginx/kubernetes-ingress/pull/7483), [7505](https://github.com/nginx/kubernetes-ingress/pull/7505), [7501](https://github.com/nginx/kubernetes-ingress/pull/7501), [7522](https://github.com/nginx/kubernetes-ingress/pull/7522), [7543](https://github.com/nginx/kubernetes-ingress/pull/7543), [7594](https://github.com/nginx/kubernetes-ingress/pull/7594), [7619](https://github.com/nginx/kubernetes-ingress/pull/7619), [7635](https://github.com/nginx/kubernetes-ingress/pull/7635) & [7650](https://github.com/nginx/kubernetes-ingress/pull/7650) Bump Go dependencies - [7607](https://github.com/nginx/kubernetes-ingress/pull/7607) Bump Go version to 1.24.2 - [7006](https://github.com/nginx/kubernetes-ingress/pull/7006), [7016](https://github.com/nginx/kubernetes-ingress/pull/7016), [7020](https://github.com/nginx/kubernetes-ingress/pull/7020), [7045](https://github.com/nginx/kubernetes-ingress/pull/7045), [7069](https://github.com/nginx/kubernetes-ingress/pull/7069), [7080](https://github.com/nginx/kubernetes-ingress/pull/7080), [7099](https://github.com/nginx/kubernetes-ingress/pull/7099), [7115](https://github.com/nginx/kubernetes-ingress/pull/7115), [7132](https://github.com/nginx/kubernetes-ingress/pull/7132), [7140](https://github.com/nginx/kubernetes-ingress/pull/7140), [7150](https://github.com/nginx/kubernetes-ingress/pull/7150), [7173](https://github.com/nginx/kubernetes-ingress/pull/7173), [7243](https://github.com/nginx/kubernetes-ingress/pull/7243), [7256](https://github.com/nginx/kubernetes-ingress/pull/7256), [7288](https://github.com/nginx/kubernetes-ingress/pull/7288), [7293](https://github.com/nginx/kubernetes-ingress/pull/7293), [7306](https://github.com/nginx/kubernetes-ingress/pull/7306), [7309](https://github.com/nginx/kubernetes-ingress/pull/7309), [7319](https://github.com/nginx/kubernetes-ingress/pull/7319), [7376](https://github.com/nginx/kubernetes-ingress/pull/7376), [7409](https://github.com/nginx/kubernetes-ingress/pull/7409), [7404](https://github.com/nginx/kubernetes-ingress/pull/7404), [7452](https://github.com/nginx/kubernetes-ingress/pull/7452), [7454](https://github.com/nginx/kubernetes-ingress/pull/7454), [7461](https://github.com/nginx/kubernetes-ingress/pull/7461), [7474](https://github.com/nginx/kubernetes-ingress/pull/7474), [7490](https://github.com/nginx/kubernetes-ingress/pull/7490), [7511](https://github.com/nginx/kubernetes-ingress/pull/7511), [7523](https://github.com/nginx/kubernetes-ingress/pull/7523), [7527](https://github.com/nginx/kubernetes-ingress/pull/7527), [7534](https://github.com/nginx/kubernetes-ingress/pull/7534), [7539](https://github.com/nginx/kubernetes-ingress/pull/7539), [7551](https://github.com/nginx/kubernetes-ingress/pull/7551), [7564](https://github.com/nginx/kubernetes-ingress/pull/7564), [7590](https://github.com/nginx/kubernetes-ingress/pull/7590), [7631](https://github.com/nginx/kubernetes-ingress/pull/7631) & [7467](https://github.com/nginx/kubernetes-ingress/pull/7467) Bump Docker dependencies @@ -186,13 +223,17 @@ versions: 1.25-1.32. 07 Feb 2025 ### Fixes + - [7295](https://github.com/nginx/kubernetes-ingress/pull/7295) Clean up and fix for NIC Pod failing to bind when NGINX exits unexpectedly ### Helm Chart + {{< call-out "warning" >}} From this release onwards, the Helm chart location has changed from `oci://ghcr.io/nginxinc/charts/nginx-ingress` to `oci://ghcr.io/nginx/charts/nginx-ingress`. {{< /call-out >}} + - [7188](https://github.com/nginx/kubernetes-ingress/pull/7188) Correct typo in helm lease annotations template ### Dependencies + - [7301](https://github.com/nginx/kubernetes-ingress/pull/7301), [7307](https://github.com/nginx/kubernetes-ingress/pull/7307) & [7310](https://github.com/nginx/kubernetes-ingress/pull/7310) Update to nginx 1.27.4 - [7163](https://github.com/nginx/kubernetes-ingress/pull/7163) Bump Go version to 1.23.5 - [7024](https://github.com/nginx/kubernetes-ingress/pull/7024), [7061](https://github.com/nginx/kubernetes-ingress/pull/7061), [7113](https://github.com/nginx/kubernetes-ingress/pull/7113), [7145](https://github.com/nginx/kubernetes-ingress/pull/7145), [7148](https://github.com/nginx/kubernetes-ingress/pull/7148), [7154](https://github.com/nginx/kubernetes-ingress/pull/7154), [7164](https://github.com/nginx/kubernetes-ingress/pull/7164), [7229](https://github.com/nginx/kubernetes-ingress/pull/7229), [7265](https://github.com/nginx/kubernetes-ingress/pull/7265), [7250](https://github.com/nginx/kubernetes-ingress/pull/7250), [7296](https://github.com/nginx/kubernetes-ingress/pull/7296) & [7321](https://github.com/nginx/kubernetes-ingress/pull/7321) Bump Go dependencies @@ -225,24 +266,28 @@ For full details on updating your resources, see [Update custom resource apiVers Updates have been made to our logging library. For a while, F5 NGINX Ingress Controller has been using the [golang/glog](https://github.com/golang/glog). For this release, we have moved to the native golang library [log/slog](https://pkg.go.dev/log/slog). This change was made for these reasons: + 1. By using a standard library, we ensure that updates are more consistent, and any known vulnerabilities are more likely to be addressed in a timely manner. -2. By moving to `log/slog`, we enable support for a wider range of logging formats, as well as allowing log outputs to be displayed in a Structured format, and for faster log parsing. +1. By moving to `log/slog`, we enable support for a wider range of logging formats, as well as allowing log outputs to be displayed in a Structured format, and for faster log parsing. Layer 4 applications got some love this release, with added support for SNI based routing with our TransportServer resource! In scenarios where you have multiple applications hosted on a single node, this feature enables routing to those applications through the host header. For more details on what this feature does, and how to configure it yourself, please look to our [examples section in Github](https://github.com/nginx/kubernetes-ingress/tree/v4.0.0/examples/custom-resources/transport-server-sni#transportserver-sni) ### Breaking Changes + - [6903](https://github.com/nginx/kubernetes-ingress/pull/6903) & [6921](https://github.com/nginx/kubernetes-ingress/pull/6921) Add support for NGINX Plus R33 - [6800](https://github.com/nginx/kubernetes-ingress/pull/6800) Deprecate v1alpha1 CRDs for GlobalConfiguration, Policy & TransportServer - [6520](https://github.com/nginx/kubernetes-ingress/pull/6520) & [6474](https://github.com/nginx/kubernetes-ingress/pull/6474) Add structured logging ### Features + - [6605](https://github.com/nginx/kubernetes-ingress/pull/6605) TransportServer SNI - [6819](https://github.com/nginx/kubernetes-ingress/pull/6819) Add events to configmap - [6878](https://github.com/nginx/kubernetes-ingress/pull/6878) Add events when special secrets update ### Fixes + - [6583](https://github.com/nginx/kubernetes-ingress/pull/6583) Generate valid yaml for ReadOnly FS - [6635](https://github.com/nginx/kubernetes-ingress/pull/6635) UpstreamServer Fields Logs Displayed as Memory Addresses - [6661](https://github.com/nginx/kubernetes-ingress/pull/6661) Revert to original main-template without pod downtime @@ -250,10 +295,12 @@ For more details on what this feature does, and how to configure it yourself, pl - [6780](https://github.com/nginx/kubernetes-ingress/pull/6780) Use default VS and TS templates when CfgMap obj is deleted ### Helm Chart + - [6667](https://github.com/nginx/kubernetes-ingress/pull/6667) Helm schema examples - [6998](https://github.com/nginx/kubernetes-ingress/pull/6998) Update kubernetes version to v1.32.0 in helm schema ### Dependencies + - [6485](https://github.com/nginx/kubernetes-ingress/pull/6485), [6497](https://github.com/nginx/kubernetes-ingress/pull/6497), [6512](https://github.com/nginx/kubernetes-ingress/pull/6512), [6533](https://github.com/nginx/kubernetes-ingress/pull/6533), [6543](https://github.com/nginx/kubernetes-ingress/pull/6543), [6557](https://github.com/nginx/kubernetes-ingress/pull/6557), [6580](https://github.com/nginx/kubernetes-ingress/pull/6580), [6607](https://github.com/nginx/kubernetes-ingress/pull/6607), [6638](https://github.com/nginx/kubernetes-ingress/pull/6638), [6654](https://github.com/nginx/kubernetes-ingress/pull/6654), [6657](https://github.com/nginx/kubernetes-ingress/pull/6657), [6676](https://github.com/nginx/kubernetes-ingress/pull/6676), [6685](https://github.com/nginx/kubernetes-ingress/pull/6685), [6699](https://github.com/nginx/kubernetes-ingress/pull/6699), [6697](https://github.com/nginx/kubernetes-ingress/pull/6697), [6719](https://github.com/nginx/kubernetes-ingress/pull/6719), [6717](https://github.com/nginx/kubernetes-ingress/pull/6717), [6747](https://github.com/nginx/kubernetes-ingress/pull/6747), [6743](https://github.com/nginx/kubernetes-ingress/pull/6743), [6775](https://github.com/nginx/kubernetes-ingress/pull/6775), [6789](https://github.com/nginx/kubernetes-ingress/pull/6789), [6762](https://github.com/nginx/kubernetes-ingress/pull/6762), [6786](https://github.com/nginx/kubernetes-ingress/pull/6786), [6845](https://github.com/nginx/kubernetes-ingress/pull/6845), [6864](https://github.com/nginx/kubernetes-ingress/pull/6864), [6880](https://github.com/nginx/kubernetes-ingress/pull/6880), [6862](https://github.com/nginx/kubernetes-ingress/pull/6862), [6897](https://github.com/nginx/kubernetes-ingress/pull/6897), [6890](https://github.com/nginx/kubernetes-ingress/pull/6890), [6905](https://github.com/nginx/kubernetes-ingress/pull/6905), [6906](https://github.com/nginx/kubernetes-ingress/pull/6906), [6909](https://github.com/nginx/kubernetes-ingress/pull/6909), [6919](https://github.com/nginx/kubernetes-ingress/pull/6919), [6936](https://github.com/nginx/kubernetes-ingress/pull/6936), [6945](https://github.com/nginx/kubernetes-ingress/pull/6945), [6971](https://github.com/nginx/kubernetes-ingress/pull/6971) & [6982](https://github.com/nginx/kubernetes-ingress/pull/6982) Bump the Docker dependencies - [6483](https://github.com/nginx/kubernetes-ingress/pull/6483), [6496](https://github.com/nginx/kubernetes-ingress/pull/6496), [6522](https://github.com/nginx/kubernetes-ingress/pull/6522), [6540](https://github.com/nginx/kubernetes-ingress/pull/6540), [6559](https://github.com/nginx/kubernetes-ingress/pull/6559), [6589](https://github.com/nginx/kubernetes-ingress/pull/6589), [6614](https://github.com/nginx/kubernetes-ingress/pull/6614), [6643](https://github.com/nginx/kubernetes-ingress/pull/6643), [6669](https://github.com/nginx/kubernetes-ingress/pull/6669), [6683](https://github.com/nginx/kubernetes-ingress/pull/6683), [6704](https://github.com/nginx/kubernetes-ingress/pull/6704), [6712](https://github.com/nginx/kubernetes-ingress/pull/6712), [6728](https://github.com/nginx/kubernetes-ingress/pull/6728), [6745](https://github.com/nginx/kubernetes-ingress/pull/6745), [6767](https://github.com/nginx/kubernetes-ingress/pull/6767), [6782](https://github.com/nginx/kubernetes-ingress/pull/6782), [6815](https://github.com/nginx/kubernetes-ingress/pull/6815), [6826](https://github.com/nginx/kubernetes-ingress/pull/6826), [6835](https://github.com/nginx/kubernetes-ingress/pull/6835), [6842](https://github.com/nginx/kubernetes-ingress/pull/6842), [6861](https://github.com/nginx/kubernetes-ingress/pull/6861), [6916](https://github.com/nginx/kubernetes-ingress/pull/6916), [6908](https://github.com/nginx/kubernetes-ingress/pull/6908), [6931](https://github.com/nginx/kubernetes-ingress/pull/6931), [6969](https://github.com/nginx/kubernetes-ingress/pull/6969), [6973](https://github.com/nginx/kubernetes-ingress/pull/6973), [6988](https://github.com/nginx/kubernetes-ingress/pull/6988) & [6994](https://github.com/nginx/kubernetes-ingress/pull/6994) Bump the go dependencies @@ -273,8 +320,6 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.32. - ---- ## 3.7.2 25 Nov 2024 @@ -289,9 +334,10 @@ To ensure backwards compatibility, we will ensure the existing log format, `glog {{< call-out "important" >}} CRD version removal notice. In our next major release, `v4.0.0`, support for the following apiVersions for these listed CRDs will be dropped: + 1. `k8s.nginx.org/v1alpha` for `GlobalConfiguration` -2. `k8s.nginx.org/v1alpha` for `Policy` -3. `k8s.nginx.org/v1alpha` for `TransportServer` +1. `k8s.nginx.org/v1alpha` for `Policy` +1. `k8s.nginx.org/v1alpha` for `TransportServer` Prior to upgrading, **please ensure** that any of these resources deployed as `apiVersion: k8s.nginx.org/v1alpha1` are upgraded to `apiVersion: k8s.nginx.org/v1` If a resource of `kind: GlobalConfiguration`, `kind: Policy` or `kind: TransportServer` are deployed as `apiVersion: k8s.nginx.org/v1alpha1`, these resources will be **deleted** when upgrading from, at least, `v3.4.0` to `v4.0.0` @@ -300,9 +346,11 @@ When `v4.0.0` is released, the release notes will contain the required upgrade s {{< /call-out >}} ### Fixes + - [6838](https://github.com/nginx/kubernetes-ingress/pull/6838) Update oidc_template and conf ### Dependencies + - [6779](https://github.com/nginx/kubernetes-ingress/pull/6779), [6790](https://github.com/nginx/kubernetes-ingress/pull/6790) & [6851](https://github.com/nginx/kubernetes-ingress/pull/6851) Bump the Docker dependencies - [6791](https://github.com/nginx/kubernetes-ingress/pull/6791), [6849](https://github.com/nginx/kubernetes-ingress/pull/6849) & [6839](https://github.com/nginx/kubernetes-ingress/pull/6839) Bump the go dependencies @@ -321,16 +369,17 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.31. ---- ## 3.7.1 06 Nov 2024 ### Fixes + - [6735](https://github.com/nginx/kubernetes-ingress/pull/6735) Add nil check to apikey suppliedIn - [6761](https://github.com/nginx/kubernetes-ingress/pull/6761) Add OIDC fix for ID token nonce claim validation ### Dependencies + - [6545](https://github.com/nginx/kubernetes-ingress/pull/6545), [6560](https://github.com/nginx/kubernetes-ingress/pull/6560), [6560](https://github.com/nginx/kubernetes-ingress/pull/6560), [6619](https://github.com/nginx/kubernetes-ingress/pull/6619), [6640](https://github.com/nginx/kubernetes-ingress/pull/6640), [6664](https://github.com/nginx/kubernetes-ingress/pull/6664), [6686](https://github.com/nginx/kubernetes-ingress/pull/6686), [6703](https://github.com/nginx/kubernetes-ingress/pull/6703), [6720](https://github.com/nginx/kubernetes-ingress/pull/6720), [6755](https://github.com/nginx/kubernetes-ingress/pull/6755) & [6751](https://github.com/nginx/kubernetes-ingress/pull/6751) Bump the Docker dependencies - [6553](https://github.com/nginx/kubernetes-ingress/pull/6553), [6591](https://github.com/nginx/kubernetes-ingress/pull/6591), [6618](https://github.com/nginx/kubernetes-ingress/pull/6618), [6648](https://github.com/nginx/kubernetes-ingress/pull/6648), [6688](https://github.com/nginx/kubernetes-ingress/pull/6688), [6674](https://github.com/nginx/kubernetes-ingress/pull/6674), [6707](https://github.com/nginx/kubernetes-ingress/pull/6707), [6730](https://github.com/nginx/kubernetes-ingress/pull/6730) & [6751](https://github.com/nginx/kubernetes-ingress/pull/6751) Bump the go dependencies - [6570](https://github.com/nginx/kubernetes-ingress/pull/6570) & [6549](https://github.com/nginx/kubernetes-ingress/pull/6549) Bump the go version @@ -350,7 +399,6 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.31. ---- ## 3.7.0 30 Sept 2024 @@ -363,6 +411,7 @@ The `access_log` directive can now be configured to point to a syslog log server When installing NGINX Ingress Controller via Helm, a uniquely named lease object will be created automatically. This allows for multiple deployments of NGINX Ingress Controller in the same namespace when leader election is enabled, without requiring a unique name to be specified manually for each deployment. ### Features + - [5968](https://github.com/nginx/kubernetes-ingress/pull/5968) Add BUILD_OS to Telemetry - [6014](https://github.com/nginx/kubernetes-ingress/pull/6014) Sync oidc repo - [6092](https://github.com/nginx/kubernetes-ingress/pull/6092) Support End Session Endpoint for OIDC and allow customizable Post-logout Redirect URI @@ -371,6 +420,7 @@ When installing NGINX Ingress Controller via Helm, a uniquely named lease object - [6367](https://github.com/nginx/kubernetes-ingress/pull/6367) Add ip as an option to listeners for TransportServer ### Fixes + - [5786](https://github.com/nginx/kubernetes-ingress/pull/5786) Change log level, to Info and above, before calling prometheus exporter functions - [5838](https://github.com/nginx/kubernetes-ingress/pull/5838) Fix api key policy undefined routes - [5885](https://github.com/nginx/kubernetes-ingress/pull/5885) Add default telemetry endpoint @@ -382,11 +432,13 @@ When installing NGINX Ingress Controller via Helm, a uniquely named lease object - [6446](https://github.com/nginx/kubernetes-ingress/pull/6446) Disable batch reload when batch finishes ### Helm Chart + - [5817](https://github.com/nginx/kubernetes-ingress/pull/5817) Remove include-year and includeYear flag - [5335](https://github.com/nginx/kubernetes-ingress/pull/5335) Choose NodePort values for controller.service.type = LoadBalancer - [6235](https://github.com/nginx/kubernetes-ingress/pull/6235) Update helm docs by @vepatel ### Dependencies + - [5789](https://github.com/nginx/kubernetes-ingress/pull/5789), [5804](https://github.com/nginx/kubernetes-ingress/pull/5804), [5821](https://github.com/nginx/kubernetes-ingress/pull/5821), [5870](https://github.com/nginx/kubernetes-ingress/pull/5870), [5880](https://github.com/nginx/kubernetes-ingress/pull/5880), [5907](https://github.com/nginx/kubernetes-ingress/pull/5907), [5949](https://github.com/nginx/kubernetes-ingress/pull/5949), [5959](https://github.com/nginx/kubernetes-ingress/pull/5959), [5993](https://github.com/nginx/kubernetes-ingress/pull/5993), [6010](https://github.com/nginx/kubernetes-ingress/pull/6010), [6071](https://github.com/nginx/kubernetes-ingress/pull/6071), [6105](https://github.com/nginx/kubernetes-ingress/pull/6105), [6132](https://github.com/nginx/kubernetes-ingress/pull/6132), [6186](https://github.com/nginx/kubernetes-ingress/pull/6186), [6195](https://github.com/nginx/kubernetes-ingress/pull/6195), [6200](https://github.com/nginx/kubernetes-ingress/pull/6200), [6215](https://github.com/nginx/kubernetes-ingress/pull/6215), [6229](https://github.com/nginx/kubernetes-ingress/pull/6229), [6266](https://github.com/nginx/kubernetes-ingress/pull/6266), [6283](https://github.com/nginx/kubernetes-ingress/pull/6283), [6287](https://github.com/nginx/kubernetes-ingress/pull/6287), [6299](https://github.com/nginx/kubernetes-ingress/pull/6299), [6310](https://github.com/nginx/kubernetes-ingress/pull/6310), [6358](https://github.com/nginx/kubernetes-ingress/pull/6358), [6364](https://github.com/nginx/kubernetes-ingress/pull/6364), [6397](https://github.com/nginx/kubernetes-ingress/pull/6397), [6412](https://github.com/nginx/kubernetes-ingress/pull/6412), [6459](https://github.com/nginx/kubernetes-ingress/pull/6459) Bump the go dependencies - [5929](https://github.com/nginx/kubernetes-ingress/pull/5929), [6337](https://github.com/nginx/kubernetes-ingress/pull/6337), [6350](https://github.com/nginx/kubernetes-ingress/pull/6350) & [6368](https://github.com/nginx/kubernetes-ingress/pull/6368) Bump the go version - [6052](https://github.com/nginx/kubernetes-ingress/pull/6052) Replace promlog with go-kit @@ -408,16 +460,17 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.31. ---- ## 3.6.2 19 Aug 2024 ### Fixes + - [6125](https://github.com/nginx/kubernetes-ingress/pull/6125) Don't log errors for not implemented grpc metrics - [6223](https://github.com/nginx/kubernetes-ingress/pull/6223) Re-order mounting debian apt source file ### Dependencies + - [5974](https://github.com/nginx/kubernetes-ingress/pull/5974), [6021](https://github.com/nginx/kubernetes-ingress/pull/6021), [5998](https://github.com/nginx/kubernetes-ingress/pull/5998), [6081](https://github.com/nginx/kubernetes-ingress/pull/6081), [6120](https://github.com/nginx/kubernetes-ingress/pull/6120), [6141](https://github.com/nginx/kubernetes-ingress/pull/6141), [6196](https://github.com/nginx/kubernetes-ingress/pull/6196), [6204](https://github.com/nginx/kubernetes-ingress/pull/6204), [6211](https://github.com/nginx/kubernetes-ingress/pull/6211), [6222](https://github.com/nginx/kubernetes-ingress/pull/6204) & [6234](https://github.com/nginx/kubernetes-ingress/pull/6234) Go dependencies - [5967](https://github.com/nginx/kubernetes-ingress/pull/5967), [6013](https://github.com/nginx/kubernetes-ingress/pull/6013), [6070](https://github.com/nginx/kubernetes-ingress/pull/6070), [6098](https://github.com/nginx/kubernetes-ingress/pull/6098), [6126](https://github.com/nginx/kubernetes-ingress/pull/6126), [6158](https://github.com/nginx/kubernetes-ingress/pull/6158), [6179](https://github.com/nginx/kubernetes-ingress/pull/6179), [6191](https://github.com/nginx/kubernetes-ingress/pull/6191), [6226](https://github.com/nginx/kubernetes-ingress/pull/6226) & [6233](https://github.com/nginx/kubernetes-ingress/pull/6233) Docker base image updates @@ -436,16 +489,17 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.31. ---- ## 3.6.1 04 Jul 2024 ### Fixes + - [5921](https://github.com/nginx/kubernetes-ingress/pull/5921) GRPC healthcheck should not have keepalive time - [5889](https://github.com/nginx/kubernetes-ingress/pull/5889) Add default telemetry endpoint ### Dependencies + - [5930](https://github.com/nginx/kubernetes-ingress/pull/5930) Bump Go version to 1.22.5 - [5947](https://github.com/nginx/kubernetes-ingress/pull/5947), [5923](https://github.com/nginx/kubernetes-ingress/pull/5923), [5943](https://github.com/nginx/kubernetes-ingress/pull/5943), [5939](https://github.com/nginx/kubernetes-ingress/pull/5939) and [5882](https://github.com/nginx/kubernetes-ingress/pull/5882) Docker image updates - [5951](https://github.com/nginx/kubernetes-ingress/pull/5951), [5933](https://github.com/nginx/kubernetes-ingress/pull/5933), [5884](https://github.com/nginx/kubernetes-ingress/pull/5884) and [5877](https://github.com/nginx/kubernetes-ingress/pull/5877) Go dependencies update @@ -465,18 +519,18 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.30. ---- ## 3.6.0 25 Jun 2024 Added support for the latest generation of NGINX App Protect Web Application Firewall, v5. NGINX Ingress Controller will continue to support the NGINX App Protect v4 family to allow customers to implement new Policy Bundle workflow at their own pace. -F5 WAF for NGINX v5 does not accept the JSON based policies, instead requiring users to compile a Policy Bundle outside of the NGINX Ingress Controller pod. Policy bundles contain a combination of custom Policy, signatures, and campaigns. Bundles can be compiled using either App Protect [compiler]({{< ref "/nap-waf/v5/admin-guide/compiler/" >}}), or [NGINX Instance Manager]({{< ref "/nim/nginx-app-protect/manage-waf-security-policies.md#list-security-policy-bundles" >}}). Read more in the [F5 WAF for NGINX V5]({{< ref "/nic/installation/integrations/app-protect-waf-v5/" >}}) topic. +F5 WAF for NGINX v5 does not accept the JSON based policies, instead requiring users to compile a Policy Bundle outside of the NGINX Ingress Controller pod. Policy bundles contain a combination of custom Policy, signatures, and campaigns. Bundles can be compiled using either the F5 WAF for NGINX [compiler]({{< ref "/waf/configure/compiler.md" >}}), or [NGINX Instance Manager]({{< ref "/nim/nginx-app-protect/manage-waf-security-policies.md#list-security-policy-bundles" >}}). Read more in the [F5 WAF for NGINX V5]({{< ref "/nic/installation/integrations/app-protect-waf-v5/" >}}) topic. With this release, NGINX Ingress Controller is implementing a new image maintenance policy. Container images for subscribed users will be updated on a regular basis in-between releases to reduce the CVE vulnerabilities. Customers can observe the 3.6.x tag when listing images in the registry and select the latest image to update to for the current release. ### Features + - [5698](https://github.com/nginx/kubernetes-ingress/pull/5698), [5771](https://github.com/nginx/kubernetes-ingress/pull/5771) & [5784](https://github.com/nginx/kubernetes-ingress/pull/5784) Add support for F5 NGINX AppProtect WAF v5 - [5580](https://github.com/nginx/kubernetes-ingress/pull/5580) & [5752](https://github.com/nginx/kubernetes-ingress/pull/5752) Add APIKey Authentication policy - [5205](https://github.com/nginx/kubernetes-ingress/pull/5205) Preserve valid listeners when invalid listeners are present in GlobalConfiguration @@ -484,14 +538,17 @@ Customers can observe the 3.6.x tag when listing images in the registry and sele - [5406](https://github.com/nginx/kubernetes-ingress/pull/5406), [5408](https://github.com/nginx/kubernetes-ingress/pull/5408), [5418](https://github.com/nginx/kubernetes-ingress/pull/5418), [5404](https://github.com/nginx/kubernetes-ingress/pull/5404) & [5415](https://github.com/nginx/kubernetes-ingress/pull/5415) Add additional telemetry data ### Fixes + - [5350](https://github.com/nginx/kubernetes-ingress/pull/5350) Fix ap-waf flag in error message - [5318](https://github.com/nginx/kubernetes-ingress/pull/5318) Don't reload when `use-cluster-ip` endpoints update, and change the ingress `use-cluster-ip` implementation to use the cluster ip instead of the fqdn - [5375](https://github.com/nginx/kubernetes-ingress/pull/5375) Fix status for invalid vs and vsr, for weight changes dynamic reload ### Helm Chart + - [5313](https://github.com/nginx/kubernetes-ingress/pull/5313) Update helm flag in docs for enableWeightChangesDynamicReload ### Dependencies + - [5693](https://github.com/nginx/kubernetes-ingress/pull/5693) Bump Go version to v1.22.4 - [5368](https://github.com/nginx/kubernetes-ingress/pull/5368), [5331](https://github.com/nginx/kubernetes-ingress/pull/5331) & [5423](https://github.com/nginx/kubernetes-ingress/pull/5423) Bump the go dependencies - [5298](https://github.com/nginx/kubernetes-ingress/pull/5298), [5344](https://github.com/nginx/kubernetes-ingress/pull/5344), [5345](https://github.com/nginx/kubernetes-ingress/pull/5345),[5371](https://github.com/nginx/kubernetes-ingress/pull/5371), [5378](https://github.com/nginx/kubernetes-ingress/pull/5378), [5379](https://github.com/nginx/kubernetes-ingress/pull/5379), [5398](https://github.com/nginx/kubernetes-ingress/pull/5398), [5397](https://github.com/nginx/kubernetes-ingress/pull/5397), [5399](https://github.com/nginx/kubernetes-ingress/pull/5399) & [5400](https://github.com/nginx/kubernetes-ingress/pull/5400) Bump base Docker images @@ -511,7 +568,6 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes its provider and that passes the Kubernetes conformance tests. This release was fully tested on the following Kubernetes versions: 1.25-1.30. ---- ## 3.5.2 31 May 2024 @@ -526,7 +582,6 @@ versions: 1.25-1.30. - [5590](https://github.com/nginx/kubernetes-ingress/pull/5590), [5631](https://github.com/nginx/kubernetes-ingress/pull/5631), [5638](https://github.com/nginx/kubernetes-ingress/pull/5638), [5662](https://github.com/nginx/kubernetes-ingress/pull/5662), [5623](https://github.com/nginx/kubernetes-ingress/pull/5623) Go updates - [5579](https://github.com/nginx/kubernetes-ingress/pull/5579), [5642](https://github.com/nginx/kubernetes-ingress/pull/5642), [5573](https://github.com/nginx/kubernetes-ingress/pull/5573), [5630](https://github.com/nginx/kubernetes-ingress/pull/5630), [5665](https://github.com/nginx/kubernetes-ingress/pull/5665), [5673](https://github.com/nginx/kubernetes-ingress/pull/5673) Container base image updates - ### Upgrade - For NGINX, use the 3.5.2 images from our @@ -547,14 +602,17 @@ versions: 1.25-1.30. 08 May 2024 ### Fixes + - [5463](https://github.com/nginx/kubernetes-ingress/pull/5463) Don't reload when use-cluster-ip endpoints update - [5464](https://github.com/nginx/kubernetes-ingress/pull/5464) Fix status for invalid vs and vsr, for weight changes dynamic reload - [5470](https://github.com/nginx/kubernetes-ingress/pull/5470) Add support for named ports in ingresses which use-cluster-ip ### Helm Chart + - [5315](https://github.com/nginx/kubernetes-ingress/pull/5315) Update helm flag in docs for enableWeightChangesDynamicReload ### Dependencies + - [5511](https://github.com/nginx/kubernetes-ingress/pull/5511) & [5391](https://github.com/nginx/kubernetes-ingress/pull/5391) Go updates - [5490](https://github.com/nginx/kubernetes-ingress/pull/5490) Pin app-protect module version to 4.8.1 @@ -585,6 +643,7 @@ The [**use-cluster-ip**]({{< ref "/nic/configuration/ingress-resources/advanced- **use-cluster-ip** supports service meshes and specific use cases where the backend service should be the target instead of individual backend service pods, bypassing upstream load balancing. ### Features + - [5179](https://github.com/nginx/kubernetes-ingress/pull/5179) & [5051](https://github.com/nginx/kubernetes-ingress/pull/5051) Add NIM Security Dashboard integration for F5 WAF for NGINX security violations - [5212](https://github.com/nginx/kubernetes-ingress/pull/5212) Weight changes Dynamic Reload - [4862](https://github.com/nginx/kubernetes-ingress/pull/4862) Add use-cluster-ip annotation for ingress resources @@ -595,15 +654,18 @@ The [**use-cluster-ip**]({{< ref "/nic/configuration/ingress-resources/advanced- - [4896](https://github.com/nginx/kubernetes-ingress/pull/4896), [5095](https://github.com/nginx/kubernetes-ingress/pull/5095), [5147](https://github.com/nginx/kubernetes-ingress/pull/5147), [5155](https://github.com/nginx/kubernetes-ingress/pull/5155), [5170](https://github.com/nginx/kubernetes-ingress/pull/5170), [5176](https://github.com/nginx/kubernetes-ingress/pull/5176), [5217](https://github.com/nginx/kubernetes-ingress/pull/5217), [5245](https://github.com/nginx/kubernetes-ingress/pull/5245), [5237](https://github.com/nginx/kubernetes-ingress/pull/5237), [5256](https://github.com/nginx/kubernetes-ingress/pull/5256), [5167](https://github.com/nginx/kubernetes-ingress/pull/5167) & [5261](https://github.com/nginx/kubernetes-ingress/pull/5261) Export Telemetry data to XCDF ### Fixes + - [5211](https://github.com/nginx/kubernetes-ingress/pull/5211) Move set above rewrite to fix uninitialized variable - [5175](https://github.com/nginx/kubernetes-ingress/pull/5175) Initialize `stopCh` channel for ExternalDNS - [5053](https://github.com/nginx/kubernetes-ingress/pull/5053) Ensure `backup` server is removed from upstreams when the Backup Service is deleted ### Helm Chart + - [5159](https://github.com/nginx/kubernetes-ingress/pull/5159) Refactor volumes and volumeMounts to common helpers - [5179](https://github.com/nginx/kubernetes-ingress/pull/5179) Move common pod label definitions to helpers ### Dependencies + - [4803](https://github.com/nginx/kubernetes-ingress/pull/4803), [4846](https://github.com/nginx/kubernetes-ingress/pull/4846), [4873](https://github.com/nginx/kubernetes-ingress/pull/4873), [4905](https://github.com/nginx/kubernetes-ingress/pull/4905), [5098](https://github.com/nginx/kubernetes-ingress/pull/5098), [5108](https://github.com/nginx/kubernetes-ingress/pull/5108), [5125](https://github.com/nginx/kubernetes-ingress/pull/5125), [5132](https://github.com/nginx/kubernetes-ingress/pull/5132), [5207](https://github.com/nginx/kubernetes-ingress/pull/5207), [5234](https://github.com/nginx/kubernetes-ingress/pull/5234), [5267](https://github.com/nginx/kubernetes-ingress/pull/5267), [5272](https://github.com/nginx/kubernetes-ingress/pull/5272) & [5218](https://github.com/nginx/kubernetes-ingress/pull/5218) Go Dependency updates - [5208](https://github.com/nginx/kubernetes-ingress/pull/5208) Bump Go version to 1.22.1 @@ -627,6 +689,7 @@ versions: 1.23-1.29. 19 Feb 2024 ### Fixes + - [5008](https://github.com/nginx/kubernetes-ingress/pull/5008) Remove redundant Prometheus variable labels - [4744](https://github.com/nginx/kubernetes-ingress/pull/4744) Fixed validation for VSR exact & regex subroutes. Thanks to [jo-carter](https://github.com/jo-carter). - [4832](https://github.com/nginx/kubernetes-ingress/pull/4832) Fix new lines in snippets @@ -634,9 +697,11 @@ versions: 1.23-1.29. - [5041](https://github.com/nginx/kubernetes-ingress/pull/5041) Allow waf users to build without dos repo access ### Helm Chart + - [4953](https://github.com/nginx/kubernetes-ingress/pull/4953) Add docs links to helm NOTES.txt ### Dependencies + - [5073](https://github.com/nginx/kubernetes-ingress/pull/5073), [5029](https://github.com/nginx/kubernetes-ingress/pull/5029) Bump redhat/ubi8 base image - [4992](https://github.com/nginx/kubernetes-ingress/pull/4992) Bump ubi base image - [4994](https://github.com/nginx/kubernetes-ingress/pull/4994) Bump redhat/ubi9-minimal base image @@ -667,6 +732,7 @@ versions: 1.23-1.29. 16 Jan 2024 ### Fixes + [4934](https://github.com/nginx/kubernetes-ingress/pull/4934) GCR & AWS Plus image publishing fix ### Upgrade @@ -689,6 +755,7 @@ versions: 1.23-1.29. 15 Jan 2024 ### Dependencies + [4886](https://github.com/nginx/kubernetes-ingress/pull/4886) Update N+ to R31 [4886](https://github.com/nginx/kubernetes-ingress/pull/4886) Bump Go dependencies. @@ -1363,7 +1430,6 @@ We will provide technical support for NGINX Ingress Controller on any Kubernetes 17 May 2022 ### Dependencies -the documentation here - Update Go dependencies. @@ -1916,7 +1982,7 @@ You will find the complete changelog for release 1.11.0, including bug fixes, im ### Upgrade --- For NGINX, use the 1.11.0 image from our DockerHub: `nginx/nginx-ingress:1.11.0`, `nginx/nginx-ingress:1.11.0-alpine` or `nginx-ingress:1.11.0-ubi` +- For NGINX, use the 1.11.0 image from our DockerHub: `nginx/nginx-ingress:1.11.0`, `nginx/nginx-ingress:1.11.0-alpine` or `nginx-ingress:1.11.0-ubi` - For NGINX Plus, please build your own image using the 1.11.0 source code. - For Helm, use version 0.9.0 of the chart. - [1241](https://github.com/nginx/kubernetes-ingress/pull/1241) improved the Makefile. As a result, the commands for building the Ingress Controller image were changed. See the updated commands [here]({{< ref "/nic/installation/build-nginx-ingress-controller.md" >}}). @@ -2094,7 +2160,7 @@ You will find the complete changelog for release 1.9.0, including bug fixes, imp - [1120](https://github.com/nginx/kubernetes-ingress/pull/1120) Add RateLimit policy support. - [1058](https://github.com/nginx/kubernetes-ingress/pull/1058) Support policies in VS routes and VSR subroutes. - [1147](https://github.com/nginx/kubernetes-ingress/pull/1147) Add option to specify other log destinations in AppProtect. -- [1131](https://github.com/nginx/kubernetes-ingress/pull/1131) Update packages and CRDs to AppProtect 2.0. This update includes features such as: [JSON Schema Validation]({{< ref "/nap-waf/v4/configuration-guide/configuration.md#applying-a-json-schema" >}}), [User-Defined URLs]({{< ref "/nap-waf/v4/configuration-guide/configuration.md#user-defined-urls" >}}) and [User-Defined Parameters]({{< ref "/nap-waf/v4/configuration-guide/configuration.md#user-defined-parameters" >}}). See the [release notes]({{< ref "/nap-waf/v4/releases/about-2.0.md" >}}) for a complete feature list. +- [1131](https://github.com/nginx/kubernetes-ingress/pull/1131) Update packages and CRDs to AppProtect 2.0. This update includes features such as [JSON Schema Validation]({{< ref "/waf/policies/xml-json-content.md" >}}) and [User-defined URLs and parameters]({{< ref "/waf/policies/user-urls-parameters.md" >}}). See the [release notes]({{< ref "/waf/changelog.md" >}}) for a complete feature list. - [1100](https://github.com/nginx/kubernetes-ingress/pull/1100) Add external references to AppProtect. - [1085](https://github.com/nginx/kubernetes-ingress/pull/1085) Add installation of threat campaigns package. - [1133](https://github.com/nginx/kubernetes-ingress/pull/1133) Add support for IngressClass resources. diff --git a/content/nic/technical-specifications.md b/content/nic/technical-specifications.md index 63c9ccce7..40140b441 100644 --- a/content/nic/technical-specifications.md +++ b/content/nic/technical-specifications.md @@ -17,7 +17,7 @@ We test NGINX Ingress Controller on a range of Kubernetes platforms for each rel {{< table >}} | NIC version | Kubernetes versions tested | NIC Helm Chart version | NIC Operator version | NGINX / NGINX Plus version | End of Technical Support | | --- | --- | --- | --- | --- | --- | -| {{< nic-version >}} | 1.26 - 1.34 | {{< nic-helm-version >}} | {{< nic-operator-version >}} | 1.29.1 / R35 | - | +| {{< nic-version >}} | 1.27 - 1.34 | {{< nic-helm-version >}} | {{< nic-operator-version >}} | 1.29.1 / R35 | - | | 5.1.1 | 1.25 - 1.33 | 2.2.2 | 3.2.3 | 1.29.1 / R35 | Aug 15, 2027 | | 5.0.0 | 1.25 - 1.32 | 2.1.0 | 3.1.0 | 1.27.4 / R34 | Apr 16, 2027 | | 4.0.1 | 1.25 - 1.32 | 2.0.1 | 3.0.1 | 1.27.4 / R33 P2 | Feb 7, 2027 | diff --git a/content/nim/admin-guide/authentication/oidc/getting-started.md b/content/nim/admin-guide/authentication/oidc/getting-started.md index 15796d1d2..a51cc25e0 100644 --- a/content/nim/admin-guide/authentication/oidc/getting-started.md +++ b/content/nim/admin-guide/authentication/oidc/getting-started.md @@ -1,11 +1,10 @@ --- -description: '' -nd-docs: DOCS-1267 title: Get started with OIDC toc: true weight: 1 -type: -- tutorial +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-1267 --- ## Overview @@ -25,7 +24,7 @@ When using OIDC for authentication, administrators don't need to create and mana To grant users access using OIDC, follow these steps: 1. Create a role in NGINX Instance Manager. -2. Create a user group and assign a role to it. **Important**: The group name must exactly match a group name in your IdP. +2. Create a user group and assign a role to it. **The group name must exactly match a group name in your IdP**. 3. Set up OIDC. ### Create a role {#create-role} @@ -96,7 +95,6 @@ The sections below provide detailed descriptions of the OIDC configuration value For custom settings, adjust parameters such as `$oidc_authz_path_params_enable`, `$oidc_logout_query_params`, and others to match your IdP’s needs. - ## Set up specific IdPs for OIDC {#oidc-specific-idps} For specific IdP setup instructions, refer to the following: diff --git a/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md b/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md index 66ef26b36..d493bbb38 100644 --- a/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md +++ b/content/nim/admin-guide/authentication/oidc/microsoft-entra-setup.md @@ -1,10 +1,10 @@ --- -nd-docs: DOCS-795 title: 'Microsoft Entra: Set up OIDC authentication' toc: true weight: 100 -type: -- tutorial +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-795 --- ## Overview @@ -18,21 +18,21 @@ To configure Microsoft Entra as an OIDC IdP, follow these steps: **Configure Microsoft Entra:** 1. Create an Application Registration for NGINX Instance Manager. -2. Add owners (users) and their email addresses to Microsoft Entra. -3. Create groups in Microsoft Entra and assign user membership. +1. Add owners (users) and their email addresses to Microsoft Entra. +1. Create groups in Microsoft Entra and assign user membership. **Configure NGINX Instance Manager:** 1. Add user groups to NGINX Instance Manager, using the same group names as in Microsoft Entra. -2. Configure NGINX Plus in NGINX Instance Manager to use Microsoft Entra as the designated identity provider. +1. Configure NGINX Plus in NGINX Instance Manager to use Microsoft Entra as the designated identity provider. -## Requirements +## Before you begin To successfully follow the instructions in this guide, you must complete the following requirements: 1. Create a [Microsoft Entra premium account](https://azure.microsoft.com/en-us/pricing/details/active-directory/). If you have a standard account, you'll need to upgrade. -2. [Install Instance Manager]({{< ref "/nim/deploy/vm-bare-metal/install.md" >}}) on a server that also has [NGINX Plus R25 or a newer version installed]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). Make sure the server hosting NGINX Plus has a fully qualified domain name (FQDN). -3. [Install the NGINX JavaScript module (njs)](https://www.nginx.com/blog/introduction-nginscript/) on the same server as Instance Manager. This module is necessary for managing communications between NGINX Plus and the identity provider. +1. [Install Instance Manager]({{< ref "/nim/deploy/vm-bare-metal/install.md" >}}) on a server that also has [NGINX Plus R25 or a newer version installed]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). Make sure the server hosting NGINX Plus has a fully qualified domain name (FQDN). +1. [Install the NGINX JavaScript module (njs)](https://www.nginx.com/blog/introduction-nginscript/) on the same server as Instance Manager. This module is necessary for managing communications between NGINX Plus and the identity provider. ## Configure Microsoft Entra {#configur-entra} @@ -43,18 +43,18 @@ Complete the steps in the section to configure Microsoft Entra for use with NGIN To register an application with Microsoft Entra: 1. Go to the [Azure portal](https://portal.azure.com/#home) and log in. -2. Select **Microsoft Entra** from the list of Azure services. -3. On the left navigation menu, under the **Manage** section, select **App registrations**. -4. Select **New registration**. -5. Provide the following details: +1. Select **Microsoft Entra** from the list of Azure services. +1. On the left navigation menu, under the **Manage** section, select **App registrations**. +1. Select **New registration**. +1. Provide the following details: - Enter a name for the application in the **Name** field, such as "NGINX Instance Manager". - Select **Account in this organizational directory only** from the list of account types. - Under the **Redirect URI** section, choose **Web** and enter the redirect URI, for example, `https:///_codexch`. {{< img src="/security/oidc/azure-register-app.png" alt="Azure: register an application." width="600" height="415" >}} -6. Select **Register**. -7. On the confirmation page, make a note of the following information. You'll need to provide this information later to complete the setup: +1. Select **Register**. +1. On the confirmation page, make a note of the following information. You'll need to provide this information later to complete the setup: - Application (client) ID - Directory (tenant) ID @@ -65,10 +65,10 @@ To register an application with Microsoft Entra: To create a client secret: 1. On the left navigation menu, under the **Manage** section, select **Certificates & secrets**. -2. Select **New client secret**. -3. In the **Description** box, type a description for the client secret. -4. Select **Add**. The client secret will be added to the list with a unique secret string value and ID. -5. Copy the value for the client secret. +1. Select **New client secret**. +1. In the **Description** box, type a description for the client secret. +1. Select **Add**. The client secret will be added to the list with a unique secret string value and ID. +1. Copy the value for the client secret. ### Add Owners {#az-ad-owners} @@ -87,9 +87,9 @@ To add owners (users): To include the user's group membership information in the token for authentication and authorization, follow these steps: 1. On the left navigation menu, under the **Manage** section, select **Token configuration**. -2. Select **Add groups claim**. -3. Select **Groups assigned to the application**. -4. Select **Add**. +1. Select **Add groups claim**. +1. Select **Groups assigned to the application**. +1. Select **Add**. ### Assign Group to Application {#az-ad-group} @@ -98,13 +98,13 @@ To include the user's group membership information in the token for authenticati Adding a group to the registered application will give all group members the same access. 1. On the left navigation menu, under the **Manage** section, select **Overview**. -2. In the **Essentials** section, select the link next to **Managed application in local directory**. -3. In the **Getting Started** section, select **Assign users and groups**. -4. Select **Add user/group**. -5. On the **Add Assignment** form, under the **Users and groups** section, select **None Selected**. -6. In the search box in the **Users and groups** drawer, type the name of the group you want to associate with the application. -7. Select the group from the list, and select **Select**. -8. Finally, select **Assign**. +1. In the **Essentials** section, select the link next to **Managed application in local directory**. +1. In the **Getting Started** section, select **Assign users and groups**. +1. Select **Add user/group**. +1. On the **Add Assignment** form, under the **Users and groups** section, select **None Selected**. +1. In the search box in the **Users and groups** drawer, type the name of the group you want to associate with the application. +1. Select the group from the list, and select **Select**. +1. Finally, select **Assign**. ## Configure NGINX Instance Manager {#configure-nginx-instance-manager} @@ -123,12 +123,12 @@ Configure NGINX Plus to use Microsoft Entra as the identity provider. 1. Install the NGINX JavaScript module (njs) on your NGINX Instance Manager server by running the appropriate command. This module is required for handling the interaction between NGINX Plus and Microsoft Entra (IdP). - CentOS, RHEL: - ```bash + ```shell sudo yum install nginx-plus-module-njs ``` - Debian, Ubuntu: - ```bash + ```shell sudo apt install nginx-plus-module-njs ``` @@ -142,8 +142,7 @@ Configure NGINX Plus to use Microsoft Entra as the identity provider. - `{tenant_key}`: Replace with the **Directory (tenant) ID** obtained when [registering the application](#az-ad-register-app). - `{client_secret}`: Replace with the encoded client secret that was generated when [creating the client secret](#az-ad-client-secret). -
      - Example openid_configuration.conf + {{< details summary="Example openid_configuration.conf" >}} ```yaml # NGINX Instance Manager - OpenID Connect configuration @@ -174,12 +173,11 @@ Configure NGINX Plus to use Microsoft Entra as the identity provider. } ``` -
      + {{< /details >}} 4. Using a text editor, open the `/etc/nginx/conf.d/nms-http.conf` configuration file and uncomment the OIDC settings starting with `#OIDC`. Comment out the Basic Authentication settings. Save the changes. -
      - Example nms-http.conf + {{< details summary="Example nms-http.conf" >}} ```yaml # NGINX Instance Manager - Instance Manager configuration @@ -195,19 +193,19 @@ Configure NGINX Plus to use Microsoft Entra as the identity provider. include /etc/nms/nginx/oidc/openid_connect.conf; ``` -
      + {{< /details >}} 5. Verify that the configuration file does not contain any errors: - ```bash + ```shell sudo nginx -t ``` 6. Reload NGINX and apply the configuration: - ```bash + ```shell sudo nginx -s reload ``` -## Try It Out +## Test Entrana OIDC 1. Open a web browser and go to the FQDN of your NGINX Instance Manager host. You will be redirected to the Microsoft Entra login page. -2. Enter your Microsoft Entra email address and password to log in. +1. Enter your Microsoft Entra email address and password to log in. diff --git a/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md b/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md index c51069d66..040dc06f2 100644 --- a/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md +++ b/content/nim/deploy/docker/deploy-nginx-plus-and-agent-docker.md @@ -159,7 +159,7 @@ where: - `` is the path to your private registry. - `` is the tag assigned when pushing to your registry. -Full list of CLI flags with their default values can be found in [CLI flags and environment variables]({{< ref "/nms/nginx-agent/install-nginx-agent.md#cli-flags-and-environment-variables" >}}). +Full list of CLI flags with their default values can be found in [CLI flags and environment variables]({{< ref "/agent/configuration/configuration-overview.md#cli-flags-and-environment-variables" >}}).
      ## Connecting NGINX Plus from container to NGINX Instance Manager @@ -198,4 +198,4 @@ If the instance appears as "unmanaged", check if: ## See also - [Deploying NGINX and NGINX Plus with Docker]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-docker.md" >}}) -- [Full list of agent environment variables]({{< ref "/nms/nginx-agent/install-nginx-agent.md#cli-flags-and-environment-variables" >}}) +- [Full list of agent environment variables]({{< ref "/agent/configuration/configuration-overview.md#cli-flags-and-environment-variables" >}}) diff --git a/content/nim/deploy/vm-bare-metal/install-nim-manual.md b/content/nim/deploy/vm-bare-metal/install-nim-manual.md index f486ee71b..9793e656d 100644 --- a/content/nim/deploy/vm-bare-metal/install-nim-manual.md +++ b/content/nim/deploy/vm-bare-metal/install-nim-manual.md @@ -1,12 +1,11 @@ --- -description: '' -nd-docs: DOCS-1211 title: Manually install any version of NGINX Instance Manager toc: true weight: 10 noindex: true -type: -- tutorial +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-1211 --- ## Overview @@ -29,15 +28,13 @@ To install NGINX Instance Manager, you need the following: Allow external systems access by opening network firewalls. NGINX Instance Manager uses port `443` for both gRPC and API/web interfaces. ---- - ## Download Certificate and Key {#download-cert-key} Follow these steps to download the certificate and private key for NGINX Instance Manager. You'll need these files when adding the official repository for installing NGINX Instance Manager. You can also use the certificate and key when installing NGINX Plus. 1. On the host where you're installing NGINX Instance Manager, create the `/etc/ssl/nginx/` directory: - ``` bash + ```shell sudo mkdir -p /etc/ssl/nginx ``` @@ -45,43 +42,35 @@ Follow these steps to download the certificate and private key for NGINX Instanc 3. Move and rename the `.crt` and `.key` files: - ```bash + ```shell sudo mv /etc/ssl/nginx/nginx-repo.crt sudo mv /etc/ssl/nginx/nginx-repo.key ``` The downloaded filenames may vary depending on your subscription type. Modify the commands above accordingly to match the actual filenames. ---- - ## Install NGINX {#install-nginx} Install NGINX Open Source or NGINX Plus on the host where you'll install NGINX Instance Manager. NGINX Instance Manager uses NGINX as a front-end proxy and for managing user access. - [Installing NGINX and NGINX Plus]({{< ref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}) -
      - If you're installing NGINX Plus, you can use the `nginx-repo.key` and `nginx-repo.crt` that you added in the [previous section](#download-cert-key). -
      - Supported NGINX versions +{{< details summary="Supported NGINX versions">}} {{< include "nim/tech-specs/supported-nginx-versions.md" >}} -
      +{{< /details >}} -
      - Supported Linux distributions +{{< details summary="Supported Linux distributions">}} {{< include "nim/tech-specs/supported-distros.md" >}} -
      +{{< /details >}} Make sure to review the [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs" >}}) guide for sizing requirements and other recommended specs. ---- - ## Configure metrics collection ### Disable metrics collection @@ -104,16 +93,12 @@ NGINX Instance Manager uses the following default values for ClickHouse. To chan {{< include "nim/clickhouse/clickhouse-defaults.md" >}} ---- - ## Add NGINX Instance Manager Repository {#add-nms-repo} To install NGINX Instance Manager, you need to add the official repository to pull the pre-compiled `deb` and `rpm` packages from. {{< include "installation/add-nms-repo.md" >}} ---- - ## Install Instance Manager {{}} @@ -122,11 +107,13 @@ To install NGINX Instance Manager, you need to add the official repository to pu 1. To install the latest version of Instance Manager, run the following command: - ```bash + ```shell sudo yum install -y nms-instance-manager ``` - > **IMPORTANT!** The Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + {{< call-out "warning" >}} + NGINX Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + {{< /call-out >}} {{%/tab%}} @@ -134,12 +121,14 @@ To install NGINX Instance Manager, you need to add the official repository to pu 1. To install the latest version of Instance Manager, run the following commands: - ```bash + ```shell sudo apt-get update sudo apt-get install -y nms-instance-manager ``` - > **IMPORTANT!** The Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + {{< call-out "warning" >}} + NGINX Instance Manager's administrator username (default is `admin`) and generated password are displayed in the terminal during installation. You should make a note of the password and store it securely. + {{< /call-out >}} {{%/tab%}} @@ -147,7 +136,7 @@ To install NGINX Instance Manager, you need to add the official repository to pu 2. Enable and start the NGINX Instance Manager platform services: - ```bash + ```shell sudo systemctl enable nms nms-core nms-dpm nms-ingestion nms-integrations --now ``` @@ -155,7 +144,7 @@ To install NGINX Instance Manager, you need to add the official repository to pu 3. Restart the NGINX web server: - ```bash + ```shell sudo systemctl restart nginx ``` @@ -169,7 +158,6 @@ To install NGINX Instance Manager, you need to add the official repository to pu {{< include "nim/installation/optional-steps/install-configure-vault.md" >}} - ### Configure SELinux {{< include "nim/installation/optional-steps/configure-selinux.md" >}} @@ -178,13 +166,10 @@ To install NGINX Instance Manager, you need to add the official repository to pu {{< include "installation/access-web-ui.md" >}} - ## Add License {{< include "nim/admin-guide/license/connected-install-license-note.md" >}} ---- - ## Upgrade Instance Manager {#upgrade-nim} {{}} @@ -192,7 +177,7 @@ To install NGINX Instance Manager, you need to add the official repository to pu 1. To upgrade to the latest version of the Instance Manager, run the following command: - ```bash + ```shell sudo yum update -y nms-instance-manager ``` @@ -202,7 +187,7 @@ To install NGINX Instance Manager, you need to add the official repository to pu 1. To upgrade to the latest version of the Instance Manager, run the following command: - ```bash + ```shell sudo apt-get update && \ sudo apt-get install -y --only-upgrade nms-instance-manager ``` @@ -212,7 +197,7 @@ To install NGINX Instance Manager, you need to add the official repository to pu 2. Restart the NGINX Instance Manager platform services: - ```bash + ```shell sudo systemctl restart nms ``` @@ -220,14 +205,12 @@ To install NGINX Instance Manager, you need to add the official repository to pu 3. Restart the NGINX web server: - ```bash + ```shell sudo systemctl restart nginx ``` 4. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< ref "nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Management suite. ---- - ## Next steps - [Add NGINX Open Source and NGINX Plus instances to NGINX Instance Manager]({{< ref "nim/nginx-instances/add-instance.md" >}}) diff --git a/content/nim/disconnected/add-license-disconnected-deployment.md b/content/nim/disconnected/add-license-disconnected-deployment.md index 66c2d60ed..8434babf7 100644 --- a/content/nim/disconnected/add-license-disconnected-deployment.md +++ b/content/nim/disconnected/add-license-disconnected-deployment.md @@ -1,17 +1,10 @@ --- title: Add a license (disconnected) -draft: false -description: '' weight: 200 toc: true +nd-content-type: how-to +nd-product: NIM nd-docs: DOCS-1657 -personas: -- devops -- netops -- secops -- support -type: -- how-to --- ## Overview @@ -20,7 +13,6 @@ This guide shows you how to add a license to NGINX Instance Manager in a disconn {{< call-out "tip" "Using the REST API" "" >}}{{< include "nim/how-to-access-nim-api.md" >}}{{}} - ## Before you begin ### Set the operation mode to disconnected @@ -33,12 +25,8 @@ To configure NGINX Instance Manager for a disconnected environment, you need to {{< include "licensing-and-reporting/download-jwt-from-myf5.md" >}} - -
      - ## Add license and submit initial usage report {#add-license-submit-initial-usage-report} - {{< tabs name="submit-usage-report" >}} {{%tab name="Bash script (recommended)"%}} @@ -47,9 +35,7 @@ To configure NGINX Instance Manager for a disconnected environment, you need to To add a license and submit the initial usage report in a disconnected environment, use the provided `license_usage_offline.sh` script. Run this script on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. -**Important**: The script to add a license won't work if a license has already been added. - -
      +{{< call-out "important" >}} The script to add a license won't work if a license has already been added. {{< /call-out >}} 1. {{}}[Download license_usage_offline.sh](/scripts/license_usage_offline.sh). 1. Run the following command to allow the script to run: @@ -81,7 +67,7 @@ To add a license and submit the initial usage report in a disconnected environme To license NGINX Instance Manager, complete each of the following steps in order. -**Important**: The `curl` command to add a license won't work if a license has already been added. +{{< call-out "important" >}} The `curl` command to add a license won't work if a license has already been added. {{< /call-out >}} Run these `curl` commands on a system that can access NGINX Instance Manager and connect to `https://product.apis.f5.com/` on port `443`. Replace each placeholder with your specific values. @@ -218,7 +204,6 @@ Download the initial usage report to send to F5: - On the **License > Overview** page, select **Download License Report**. - #### Submit usage report to F5 You need to submit the usage report to F5 and download the acknowledgment over REST. To do so, follow steps 5–7 in the [**REST**](#add-license-submit-initial-usage-report) tab in this section. @@ -233,5 +218,4 @@ To upload the usage acknowledgement: {{%/tab%}} - -{{
      }} +{{
      }} \ No newline at end of file diff --git a/content/nim/monitoring/metrics-api.md b/content/nim/monitoring/metrics-api.md index 2c76ffb5f..fdff54643 100644 --- a/content/nim/monitoring/metrics-api.md +++ b/content/nim/monitoring/metrics-api.md @@ -92,7 +92,7 @@ Likewise, you can get a full list of the available dimensions by querying the Ca curl -X GET --url "/api/platform/v1/analytics/catalogs/dimensions" -H "Authorization: Bearer " ``` -This information is also provided in the [Catalogs Reference]({{< ref "/nms/reference/catalogs//_index.md" >}})). +This information is also provided in the [Catalogs Reference]({{< ref "/nim/monitoring/catalogs/" >}})). ### Querying the Metrics API @@ -168,7 +168,7 @@ You must define a `startTime` when using aggregate functions. {{< /call-out >}} {{< call-out "note" >}} -The list of supported aggregate functions for any particular metric is available in the [Metrics Catalog]({{< ref "/nms/reference/catalogs//metrics.md" >}})). +The list of supported aggregate functions for any particular metric is available in the [Metrics Catalog]({{< ref "/nim/monitoring/catalogs/metrics.md" >}})). {{< /call-out>}} For example, the following query returns a single value (per dimension set), which is the sum of the metric values for the last 12 hours. To get proper values, ensure that the `endTime` is greater than the `startTime`. diff --git a/content/nim/monitoring/overview-metrics.md b/content/nim/monitoring/overview-metrics.md index 71a974ed3..cb1fe13e6 100644 --- a/content/nim/monitoring/overview-metrics.md +++ b/content/nim/monitoring/overview-metrics.md @@ -5,7 +5,7 @@ title: 'Overview: NGINX instance metrics' toc: true weight: 100 type: -- reference + - reference --- ## Overview @@ -17,7 +17,7 @@ F5 NGINX Instance Manager collects two types of data: The NGINX Agent collects metrics every 15 seconds and publishes them every 60 seconds. -For a full list of available metrics, see the [Metrics Catalog Reference]({{< ref "/nms/reference/catalogs//metrics.md" >}}). +For a full list of available metrics, see the [Metrics Catalog Reference]({{< ref "/nim/monitoring/catalogs/metrics.md" >}}). ## How metrics are collected and reported @@ -38,6 +38,12 @@ NGINX Instance Manager stores historical data in an analytics database and appli {{< include "/use-cases/monitoring/enable-nginx-oss-stub-status.md" >}} +After saving the changes, reload NGINX to apply the new configuration: + +```shell +nginx -s reload +``` + ### NGINX access log metrics Enable access logging to collect traffic metrics by parsing logs. Use the following log format: diff --git a/content/nim/monitoring/view-events-metrics.md b/content/nim/monitoring/view-events-metrics.md index 34a60bc7d..af9561e15 100644 --- a/content/nim/monitoring/view-events-metrics.md +++ b/content/nim/monitoring/view-events-metrics.md @@ -1,11 +1,11 @@ --- -description: Learn how to view events and metrics in F5 NGINX Instance Manager. -nd-docs: DOCS-847 title: View events and metrics +description: Learn how to view events and metrics in F5 NGINX Instance Manager. toc: true weight: 300 -type: -- how-to +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-847 --- ## Overview @@ -19,9 +19,9 @@ F5 NGINX Instance Manager provides events and metrics data for your instances. Y To view events in the NGINX Instance Manager user interface, take the following steps: 1. In a web browser, go to the FQDN for your NGINX Instance Manager host and log in. -2. In the **Platform** section, select **Events**. The **Events** overview page lists the events from the last six hours, with the most recent event listed first. -3. You can use the filters to filter events by level and time range, and sort events by selecting the column heading. -4. Select an event from the list to view the details. +1. In the **Platform** section, select **Events**. The **Events** overview page lists the events from the last six hours, with the most recent event listed first. +1. You can use the filters to filter events by level and time range, and sort events by selecting the column heading. +1. Select an event from the list to view the details. ## Access Events data by using the REST API @@ -37,8 +37,7 @@ To query the Events API, send a GET request similar to the following example to curl -X GET --url "https:///api/platform/v1/analytics/events" -H "Authorization: Bearer " ``` -
      -Example Response +{{< details summary="Example response" >}} ```json { @@ -112,7 +111,7 @@ curl -X GET --url "https:///api/platform/v1/analytics/events" -H "Auth } ``` -
      +{{< /details >}} ### Filter Events with Query Parameters @@ -234,8 +233,7 @@ Querying for a unique event requires only the event's UUID. curl -X GET --url "https:///api/platform/v1/analytics/events/7cb91de6-49ae-4ddc-a8b3-3255e00b9346" -H "Authorization: Bearer " ``` -
      -Example response +{{< details summary="Example response" >}} ```json { @@ -255,8 +253,7 @@ curl -X GET --url "https:///api/platform/v1/analytics/events/7cb91de6- } ``` -
      ---- +{{< /details >}} ## View Metrics in the User Interface @@ -267,10 +264,10 @@ The **Metrics Summary** page includes a highlights section of the most important To view the metrics summary for an NGINX instance, take the following steps: 1. In a web browser, go to the FQDN for your NGINX Instance Manager host and log in. -2. Under **Modules**, select the **Instance Manager**. -3. Select an instance on the **Instances** detail page. -4. Select the **Metrics Summary** tab. -5. To view detailed metrics as graphs, select the **Metrics** tab. +1. Under **Modules**, select the **Instance Manager**. +1. Select an instance on the **Instances** detail page. +1. Select the **Metrics Summary** tab. +1. To view detailed metrics as graphs, select the **Metrics** tab. {{< call-out "note" >}} Select a time range to change the period for the metrics display. The metrics data refreshes every 30 seconds. diff --git a/content/nim/nginx-app-protect/manage-waf-security-policies.md b/content/nim/nginx-app-protect/manage-waf-security-policies.md index 925543c31..4bb8b4e7e 100644 --- a/content/nim/nginx-app-protect/manage-waf-security-policies.md +++ b/content/nim/nginx-app-protect/manage-waf-security-policies.md @@ -3,8 +3,8 @@ title: Manage and deploy WAF policies and log profiles description: Learn how to use F5 NGINX Instance Manager to manage F5 WAF for NGINX security policies and security log profiles. weight: 300 toc: true -type: how-to -product: NIM +nd-content-type: how-to +nd-product: NIM nd-docs: DOCS-1105 --- @@ -23,8 +23,6 @@ The following capabilities are available only through the Instance Manager REST - Publish security policies, log profiles, attack signatures, and threat campaigns to instances and instance groups {{< /call-out >}} ---- - ## Before you begin Before continuing, complete the following steps: @@ -50,8 +48,6 @@ To access the web interface, open a browser and go to the fully qualified domain {{< include "nim/how-to-access-nim-api.md" >}} ---- - ## Create a security policy {#create-security-policy} {{}} @@ -81,25 +77,20 @@ To upload a new security policy using the REST API, send a `POST` request to the You must encode the JSON policy using `base64`. If you send the policy in plain JSON, the request will fail. -{{}} - | Method | Endpoint | |--------|--------------------------------------| | POST | `/api/platform/v1/security/policies` | -{{}} - - For example: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @ignore-xss-example.json ``` -
      -JSON Request +{{< details summary="JSON request" >}} ```json { @@ -112,10 +103,9 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies \ } ``` -
      +{{< /details >}} -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -134,36 +124,30 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies \ } ``` +{{< /details >}} + {{%/tab%}} {{}} ---- - ## Update a security policy - To update a security policy, send a `POST` or `PUT` request to the Security Policies API. - Use `POST` with the `isNewRevision=true` parameter to add a new version of an existing policy. - Use `PUT` with the policy UID to overwrite the existing version. - -{{}} - | Method | Endpoint | |--------|---------------------------------------------------------| | POST | `/api/platform/v1/security/policies?isNewRevision=true` | | PUT | `/api/platform/v1/security/policies/{system_id_string}` | -{{}} - - To use `POST`, include the policy metadata and content in your request: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies?isNewRevision=true \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @update-xss-policy.json ``` @@ -179,22 +163,18 @@ Then include the UID in your PUT request: ```shell curl -X PUT https://{{NIM_FQDN}}/api/platform/v1/security/policies/ \ -H "Authorization: Bearer " \ - --Content-Type application/json \ + -H "Content-Type application/json" \ -d @update-xss-policy.json ``` After updating the policy, you can [publish it](#publish-policy) to selected instances or instance groups. ---- - ## Delete a security policy {{}} {{%tab name="web interface"%}} -
      - To delete a security policy using the NGINX Instance Manager web interface: 1. In your browser, go to the FQDN for your NGINX Instance Manager host and log in. @@ -219,16 +199,10 @@ To delete a security policy using the REST API: 2. Send a `DELETE` request using the policy UID: - -{{}} - | Method | Endpoint | |--------|------------------------------------------------------------| | DELETE | `/api/platform/v1/security/policies/{security-policy-uid}` | -{{}} - - Example: ```shell @@ -240,11 +214,8 @@ curl -X DELETE https://{{NIM_FQDN}}/api/platform/v1/security/policies/}} ---- - ## Create security policy bundles {#create-security-policy-bundles} - To create a security policy bundle, send a `POST` request to the Security Policy Bundles API. The policies you want to include in the bundle must already exist in NGINX Instance Manager. Each bundle includes: @@ -263,25 +234,20 @@ Each bundle includes: If you don’t include `attackSignatureVersionDateTime` or `threatCampaignVersionDateTime`, the latest versions are used by default. You can also set them explicitly by using `"latest"` as the value. - -{{}} - | Method | Endpoint | |--------|----------------------------------------------| | POST | `/api/platform/v1/security/policies/bundles` | -{{}} - Example: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies/bundles \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @security-policy-bundles.json ``` -
      -JSON Request +{{< details summary="JSON request" >}} ```json { @@ -306,10 +272,9 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies/bundles \ } ``` -
      +{{< /details >}} -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -368,7 +333,7 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/policies/bundles \ } ``` ---- +{{< /details >}} ## List security policy bundles {#list-security-policy-bundles} @@ -386,16 +351,10 @@ You can use the following query parameters to filter results: If no time range is provided, the API defaults to showing bundles modified in the past 24 hours. - -{{}} - | Method | Endpoint | |--------|----------------------------------------------| | GET | `/api/platform/v1/security/policies/bundles` | -{{}} - - Example: ```shell @@ -403,8 +362,7 @@ curl -X GET https://{{NIM_FQDN}}/api/platform/v1/security/policies/bundles \ -H "Authorization: Bearer " ``` -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -463,7 +421,7 @@ curl -X GET https://{{NIM_FQDN}}/api/platform/v1/security/policies/bundles \ } ``` ---- +{{< /details >}} ## Get a security policy bundle {#get-security-policy-bundle} @@ -471,15 +429,10 @@ To retrieve a specific security policy bundle, send a `GET` request to the Secur You must have `"READ"` permission for the bundle to retrieve it. - -{{}} - | Method | Endpoint | |--------|-------------------------------------------------------------------------------------------------| | GET | `/api/platform/v1/security/policies/{security-policy-uid}/bundles/{security-policy-bundle-uid}` | -{{}} - Example: ```shell @@ -491,13 +444,12 @@ The response includes a content field that contains the bundle in base64 format. Example: -```bash +```shell curl -X GET "https://{{NIM_FQDN}}/api/platform/v1/security/policies//bundles/" \ -H "Authorization: Bearer " | jq -r '.content' | base64 -d > security-policy-bundle.tgz ``` -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -518,7 +470,7 @@ curl -X GET "https://{{NIM_FQDN}}/api/platform/v1/security/policies/ } ``` ---- +{{< /details >}} ## Create a security log profile {#create-security-log-profile} @@ -526,25 +478,20 @@ To upload a new security log profile, send a `POST` request to the Security Log You must encode the log profile in `base64` before sending it. If you send plain JSON, the request will fail. -{{}} - | Method | Endpoint | |--------|-----------------------------------------| | POST | `/api/platform/v1/security/logprofiles` | -{{}} - - Example: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @default-log-example.json ``` -
      -JSON Request +{{< details summary="JSON request" >}} ```json { @@ -555,10 +502,9 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles \ } ``` -
      +{{< /details >}} -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -576,7 +522,7 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles \ } ``` ---- +{{< /details >}} ## Update a security log profile {#update-security-log-profile} @@ -585,21 +531,17 @@ To update a security log profile, you can either: - Use `POST` with the `isNewRevision=true` parameter to add a new version. - Use `PUT` with the log profile UID to overwrite the existing version. -{{}} - | Method | Endpoint | |--------|--------------------------------------------------------------------| | POST | `/api/platform/v1/security/logprofiles?isNewRevision=true` | | PUT | `/api/platform/v1/security/logprofiles/{security-log-profile-uid}` | -{{}} - - To create a new revision: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles?isNewRevision=true \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @update-default-log.json ``` @@ -610,7 +552,7 @@ To overwrite an existing security log profile: ```shell curl -X PUT https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles/ \ -H "Authorization: Bearer " \ - --Content-Type application/json \ + -H "Content-Type application/json" \ -d @update-log-profile.json ``` @@ -619,27 +561,20 @@ To overwrite an existing security log profile: ```shell curl -X PUT https://{{NIM_FQDN}}/api/platform/v1/security/logprofiles/ \ -H "Authorization: Bearer " \ - --Content-Type application/json \ + -H "Content-Type: application/json" \ -d @update-log-profile.json ``` After updating the security log profile, you can [publish it](#publish-policy) to specific instances or instance groups. ---- - ## Delete a security log profile {#delete-security-log-profile} To delete a security log profile, send a `DELETE` request to the Security Log Profiles API using the profile’s UID. - -{{}} - | Method | Endpoint | |--------|--------------------------------------------------------------------| | DELETE | `/api/platform/v1/security/logprofiles/{security-log-profile-uid}` | -{{}} - 1. Retrieve the UID: @@ -655,24 +590,16 @@ To delete a security log profile, send a `DELETE` request to the Security Log Pr -H "Authorization: Bearer " ``` ---- - ## Publish updates to instances {#publish-policy} Use the Publish API to push security policies, log profiles, attack signatures, and threat campaigns to NGINX instances or instance groups. Call this endpoint *after* you've created or updated the resources you want to deploy. - -{{}} - | Method | Endpoint | |--------|-------------------------------------| | POST | `/api/platform/v1/security/publish` | -{{}} - - Include the following information in your request, depending on what you're publishing: - Instance and instance group UIDs @@ -686,11 +613,11 @@ Example: ```shell curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/publish \ -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ -d @publish-request.json ``` -
      -JSON Request +{{< details summary="JSON request" >}} ```json { @@ -723,10 +650,9 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/publish \ } ``` -
      +{{< /details >}} -
      -JSON Response +{{< details summary="JSON response" >}} ```json { @@ -742,9 +668,7 @@ curl -X POST https://{{NIM_FQDN}}/api/platform/v1/security/publish \ } ``` -
      - ---- +{{< /details >}} ## Check security policy and security log profile publication status {#check-publication-status} @@ -756,14 +680,10 @@ Use the following endpoints to verify whether the configuration updates were suc To view deployment status for a specific policy, send a `GET` request to the Security Deployments Associations API using the policy name. -{{}} - | Method | Endpoint | |--------|--------------------------------------------------------------------| | GET | `/api/platform/v1/security/deployments/associations/{policy-name}` | -{{}} - Example: ```shell @@ -776,14 +696,10 @@ In the response, look for the `lastDeploymentDetails` field under instance or `i ### Check publication status for a security log profile -{{}} - | Method | Endpoint | |--------|-------------------------------------------------------------------------------------| | GET | `/api/platform/v1/security/deployments/logprofiles/associations/{log-profile-name}` | -{{}} - Example: ```shell @@ -797,14 +713,10 @@ The response also contains `lastDeploymentDetails` for each instance or group. You can also view the deployment status for a specific instance by providing the system UID and instance UID. -{{}} - | Method | Endpoint | |--------|------------------------------------------------------------------| | GET | `/api/platform/v1/systems/{system-uid}/instances/{instance-uid}` | -{{}} - Example: ```shell @@ -818,14 +730,10 @@ In the response, look for the `lastDeploymentDetails` field, which shows the dep When you use the Publish API to [publish security content](#publish-policy), NGINX Instance Manager creates a deployment ID for the request. You can use this ID to check the result of the publication. -{{}} - | Method | Endpoint | |--------|------------------------------------------------------------------| | GET | `/api/platform/v1/systems/instances/deployments/{deployment-id}` | -{{}} - Example: ```shell diff --git a/content/nim/nginx-app-protect/security-monitoring/set-up-app-protect-instances.md b/content/nim/nginx-app-protect/security-monitoring/set-up-app-protect-instances.md index 935ea0fad..94299d3d9 100644 --- a/content/nim/nginx-app-protect/security-monitoring/set-up-app-protect-instances.md +++ b/content/nim/nginx-app-protect/security-monitoring/set-up-app-protect-instances.md @@ -2,8 +2,8 @@ title: Set up F5 WAF for NGINX instances for Security Monitoring weight: 100 toc: true -type: how-to -product: NIM +nd-content-type: how-to +nd-product: NIM nd-docs: DOCS-1107 --- @@ -14,22 +14,18 @@ F5 NGINX Security Monitoring supports two main use cases: - **Security Monitoring only**: Use only the Security Monitoring module to monitor data from F5 WAF for NGINX instances. You will be able to review the security dashboards to assess potential threats and identify opportunities to fine-tune your policies. Your F5 WAF for NGINX configurations are managed outside of the NGINX Instance Manager context. - **Security Monitoring and Instance Manager**: Use the Security Monitoring module with the NGINX Instance Manager. In addition to monitoring your application security, you will be able to manage your F5 WAF for NGINX configurations and security policies in a single location and push pre-compiled updates to an instance or instance group. ---- - ## Before you begin Complete the following prerequisites before proceeding with the steps in this guide. 1. If you are new to F5 WAF for NGINX, follow the instructions in the installation and configuration guides to get up and running: - - [Install F5 WAF for NGINX]({{< ref "/nap-waf/v5/admin-guide/install.md" >}}) on one or more data plane instances. Each data plane instance must have connectivity to the NGINX Instance Manager host. - - [Configure F5 WAF for NGINX]({{< ref "/nap-waf/v5/configuration-guide/configuration.md#policy-configuration-overview" >}}) according to your needs on each of the data plane instance. + - [Install F5 WAF for NGINX]({{< ref "/waf/install/" >}}) on one or more data plane instances. Each data plane instance must have connectivity to the NGINX Instance Manager host. + - [Configure F5 WAF for NGINX]({{< ref "/waf/policies/configuration.md" >}}) according to your needs on each of the data plane instance. 1. Determine your use case: **Security Monitoring only** or **Security Monitoring and Configuration Management**. 1. [Upload your license]({{< ref "/nim/admin-guide/add-license.md" >}}). ---- - ## Install NGINX Agent NGINX Agent is a companion daemon for NGINX Open Source or NGINX Plus instance that provides: @@ -136,9 +132,6 @@ sudo systemctl restart nginx-agent {{< /call-out >}} - ---- - ## Create instances for Security Monitoring only Complete the steps in this section if you are only using the Security Monitoring module to monitor your application security. In this use case, you are **not using Instance Manager** to manage your WAF security policies. @@ -193,8 +186,6 @@ Repeat the steps below on each F5 WAF for NGINX data plane instance. You should now be able to view data from your NGINX App Protect instances in the NGINX Security Monitoring dashboards. ---- - ## Create instances for Security Monitoring with Instance Manager Complete the steps in this section if you want to use the Security Monitoring module **and** Instance Manager. In this use case, you will use NGINX Instance Manager to monitor threats and to manage your F5 WAF for NGINX configurations and security policies. @@ -231,8 +222,6 @@ Take the steps below to update your F5 WAF for NGINX configurations by using Ins You should now be able to view data from your F5 WAF for NGINX instances in the Security Monitoring dashboard. ---- - ## See also - [Add user access to Security Monitoring dashboards]({{< ref "/nim/nginx-app-protect/security-monitoring/give-access-to-security-monitoring-dashboards.md" >}}) diff --git a/content/nim/nginx-app-protect/security-monitoring/update-signatures.md b/content/nim/nginx-app-protect/security-monitoring/update-signatures.md index c6ef4ee9f..a16cc0573 100644 --- a/content/nim/nginx-app-protect/security-monitoring/update-signatures.md +++ b/content/nim/nginx-app-protect/security-monitoring/update-signatures.md @@ -15,22 +15,18 @@ If the Signature Database is outdated and doesn’t match the version used in F5 Follow these steps to update the Security Monitoring module with the latest Attack Signature data, ensuring the dashboards display complete and accurate information. ---- - ## Before you begin Ensure the following prerequisites are met: -- NGINX App Protect is configured, and the Security Monitoring dashboard is collecting security violations. - ---- +- F5 WAF for NGINX is configured, and the Security Monitoring dashboard is collecting security violations. ## Update the Signature Database 1. Open an SSH connection to the data plane host and log in. -1. Generate a Signature Report file using the [Attack Signature Report Tool]({{< ref "/nap-waf/v4/configuration-guide/configuration.md#attack-signature-report-tool" >}}). Save the file as `signature-report.json`: +1. Generate a Signature Report file using the [Attack Signature Report Tool]({{< ref "/waf/policies/attack-signatures.md" >}}). Save the file as `signature-report.json`: - ```bash + ```shell sudo /opt/app_protect/bin/get-signatures -o ./signature-report.json ``` @@ -43,7 +39,7 @@ Ensure the following prerequisites are met: 1. Restart the NGINX Instance Manager services to apply the update: - ```bash + ```shell sudo systemctl restart nms-ingestion sudo systemctl restart nms-core ``` diff --git a/content/nim/nginx-app-protect/setup-waf-config-management.md b/content/nim/nginx-app-protect/setup-waf-config-management.md index 0eef3499d..3e6f73df3 100644 --- a/content/nim/nginx-app-protect/setup-waf-config-management.md +++ b/content/nim/nginx-app-protect/setup-waf-config-management.md @@ -1,10 +1,10 @@ --- title: Set up WAF configuration management -weight: 200 -toc: true description: Learn how to set up F5 NGINX Instance Manager to manage F5 WAF for NGINX configurations, including compiler installation, security policy onboarding, and threat update management. -type: how-to -product: NIM +toc: true +weight: 200 +nd-content-type: how-to +nd-product: NIM nd-docs: DOCS-996 --- @@ -16,7 +16,7 @@ F5 NGINX Instance Manager helps you manage your F5 WAF for NGINX configurations, Make sure you've completed the following prerequisites before you get started: -- You have one or more [F5 WAF for NGINX]({{< ref "/nap-waf/" >}}) instances running. For supported versions, see [Support for F5 WAF for NGINX]({{< ref "/nim/fundamentals/tech-specs.md#support-for-nginx-app-protect-waf" >}}). +- You have one or more [F5 WAF for NGINX]({{< ref "/waf/" >}}) instances running. For supported versions, see [Support for F5 WAF for NGINX]({{< ref "/nim/fundamentals/tech-specs.md#support-for-nginx-app-protect-waf" >}}). {{< call-out "note" >}}If you're using configuration management and Security Monitoring, follow the steps in the [setup guide]({{< ref "/nim/nginx-app-protect/security-monitoring/set-up-app-protect-instances.md" >}}) to set up your F5 WAF for NGINX instances first.{{< /call-out >}} @@ -28,11 +28,9 @@ Make sure you've completed the following prerequisites before you get started: NGINX Instance Manager doesn’t support the following F5 WAF for NGINX features: -- [Policies with external references]({{< ref "/nap-waf/v4/configuration-guide/configuration.md#external-references" >}}) +- [Policies with external references]({{< ref "/waf/policies/external-references.md" >}}) - Custom signatures ---- - ## Install the WAF compiler NGINX Instance Manager can use the WAF compiler to precompile security configurations before deploying them to F5 WAF for NGINX instances. Precompiling configurations improves performance and reduces the risk of runtime errors. @@ -55,6 +53,7 @@ The table below shows which WAF compiler version to use for each version of F5 W | F5 WAF for NGINX version | WAF compiler version | |-------------------------------|----------------------------| +| 5.9.0 | nms-nap-compiler-v5.527.0 | | 5.8.0 | nms-nap-compiler-v5.498.0 | | 5.7.0 | nms-nap-compiler-v5.442.0 | | 5.6.0 | nms-nap-compiler-v5.342.0 | @@ -85,18 +84,22 @@ The table below shows which WAF compiler version to use for each version of F5 W {{}} +{{< call-out "note" >}} +Beginning with v5.9.0, both the installation packages for virtual machines and those for containers are categorized under the 5.x.x tag. For previous releases, packages designed for installation on virtual machines were released as 4.x.x versions (NAP 4.15.0, NAP 4.16.0, and so on.), while packages meant for installation on containers were provided as 5.x.x versions (NAP 5.7.0, NAP 5.8.0, and so on.). +{{< /call-out >}} + ### Debian or Ubuntu To install the WAF compiler on Debian or Ubuntu, run the following command: ```shell -sudo apt-get install nms-nap-compiler-v5.498.0 +sudo apt-get install nms-nap-compiler-v5.527.0 ``` If you want to install more than one version of the WAF compiler on the same system, append the `--force-overwrite` option to the install command after the first installation: ```shell -sudo apt-get install nms-nap-compiler-v5.498.0 -o Dpkg::Options::="--force-overwrite" +sudo apt-get install nms-nap-compiler-v5.527.0 -o Dpkg::Options::="--force-overwrite" ``` {{< include "nim/nap-waf/restart-nms-integrations.md" >}} @@ -120,7 +123,7 @@ To install the WAF compiler on RHEL 8.1 : 3. Install the WAF compiler: ```shell - sudo yum install nms-nap-compiler-v5.498.0 + sudo yum install nms-nap-compiler-v5.527.0 ``` ### RHEL 9 @@ -142,7 +145,7 @@ To install the WAF compiler on RHEL 9: 3. Install the WAF compiler: ```shell - sudo yum install nms-nap-compiler-v5.498.0 + sudo yum install nms-nap-compiler-v5.527.0 ``` 4. {{< include "nim/nap-waf/restart-nms-integrations.md" >}} @@ -166,12 +169,11 @@ To install the WAF compiler on Oracle Linux 8.1: 3. Install the WAF compiler: ```shell - sudo yum install nms-nap-compiler-v5.498.0 + sudo yum install nms-nap-compiler-v5.527.0 ``` 4. {{< include "nim/nap-waf/restart-nms-integrations.md" >}} - ### Download from MyF5 If you can’t access the public NGINX repository, you can manually download the WAF compiler from [MyF5](https://my.f5.com/). @@ -242,7 +244,6 @@ error when creating the nginx repo retriever - NGINX repo certificates not found If needed, you can also [install the WAF compiler manually](#install-the-waf-compiler). - ## Install or update the WAF compiler in a disconnected environment To install the WAF compiler on a system without internet access, complete these steps: @@ -250,21 +251,20 @@ To install the WAF compiler on a system without internet access, complete these - **Step 1:** Generate the WAF compiler package on a system that has internet access. - **Step 2:** Move the generated package to the offline target system and install it. - Note : Version of NAP compiler can be referred from the table at the top of this page. -Current latest version 5.498.0 at the point of writing this document is used in below commands. +Current latest version 5.527.0 at the point of writing this document is used in below commands. {{}} {{%tab name="Ubuntu"%}} - ### Install on Ubuntu 24.04, 22.04 #### Step 1: On a system with internet access Place your `nginx-repo.crt` and `nginx-repo.key` files on this system. -```bash + +```shell sudo apt-get update -y sudo mkdir -p /etc/ssl/nginx/ sudo mv nginx-repo.crt /etc/ssl/nginx/ @@ -282,10 +282,10 @@ sudo wget -q -O /etc/apt/apt.conf.d/90pkgs-nginx https://cs.nginx.com/static/fil mkdir -p compiler && cd compiler sudo apt-get update -sudo apt-get download nms-nap-compiler-v5.498.0 +sudo apt-get download nms-nap-compiler-v5.527.0 cd ../ mkdir -p compiler/compiler.deps -sudo apt-get install --download-only --reinstall --yes --print-uris nms-nap-compiler-v5.498.0 | grep ^\' | cut -d\' -f2 | xargs -n 1 wget -P ./compiler/compiler.deps +sudo apt-get install --download-only --reinstall --yes --print-uris nms-nap-compiler-v5.527.0 | grep ^\' | cut -d\' -f2 | xargs -n 1 wget -P ./compiler/compiler.deps tar -czvf compiler.tar.gz compiler/ ``` @@ -295,7 +295,7 @@ tar -czvf compiler.tar.gz compiler/ Before running the steps, make sure the OS libraries are up to date, especially `glibc`. Move the `compiler.tar.gz` file from Step 1 to this system. -```bash +```shell tar -xzvf compiler.tar.gz sudo dpkg -i ./compiler/compiler.deps/*.deb sudo dpkg -i ./compiler/*.deb @@ -310,7 +310,8 @@ sudo dpkg -i ./compiler/*.deb #### Step 1: On a system with internet access Place your `nginx-repo.crt` and `nginx-repo.key` files on this system. -```bash + +```shell sudo apt-get update -y sudo mkdir -p /etc/ssl/nginx/ sudo mv nginx-repo.crt /etc/ssl/nginx/ @@ -328,11 +329,11 @@ sudo wget -q -O /etc/apt/apt.conf.d/90pkgs-nginx https://cs.nginx.com/static/fil mkdir -p compiler && cd compiler sudo apt-get update -sudo apt-get download nms-nap-compiler-v5.498.0 +sudo apt-get download nms-nap-compiler-v5.527.0 cd ../ mkdir -p compiler/compiler.deps -sudo apt-get install --download-only --reinstall --yes --print-uris nms-nap-compiler-v5.498.0 | grep ^\' | cut -d\' -f2 | xargs -n 1 wget -P ./compiler/compiler.deps +sudo apt-get install --download-only --reinstall --yes --print-uris nms-nap-compiler-v5.527.0 | grep ^\' | cut -d\' -f2 | xargs -n 1 wget -P ./compiler/compiler.deps tar -czvf compiler.tar.gz compiler/ ``` @@ -341,7 +342,7 @@ tar -czvf compiler.tar.gz compiler/ Before running the steps, make sure the OS libraries are up to date, especially `glibc`. Move the `compiler.tar.gz` file from Step 1 to this system. -```bash +```shell tar -xzvf compiler.tar.gz sudo dpkg -i ./compiler/compiler.deps/*.deb sudo dpkg -i ./compiler/*.deb @@ -349,7 +350,6 @@ sudo dpkg -i ./compiler/*.deb {{%/tab%}} - {{%tab name="RHEL9, Oracle-9 "%}} ### Install on RHEL 9 or Oracle Linux 9 @@ -359,7 +359,8 @@ sudo dpkg -i ./compiler/*.deb > For RHEL 8, you can skip the `yum-config-manager` line. Place your `nginx-repo.crt` and `nginx-repo.key` files on this system. -```bash + +```shell sudo yum update -y sudo yum install yum-utils -y sudo mkdir -p /etc/ssl/nginx/ @@ -370,7 +371,7 @@ sudo yum-config-manager --disable rhel-9-appstream-rhui-rpms sudo yum update -y sudo mkdir -p nms-nap-compiler -sudo yumdownloader --resolve --destdir=nms-nap-compiler nms-nap-compiler-v5.498.0 +sudo yumdownloader --resolve --destdir=nms-nap-compiler nms-nap-compiler-v5.527.0 tar -czvf compiler.tar.gz nms-nap-compiler/ ``` @@ -379,7 +380,7 @@ tar -czvf compiler.tar.gz nms-nap-compiler/ Before running the steps, make sure the OS libraries are up to date, especially `glibc`. Move the `compiler.tar.gz` file from Step 1 to this system. -```bash +```shell tar -xzvf compiler.tar.gz cd nms-nap-compiler sudo dnf install *.rpm --disablerepo=* @@ -387,7 +388,6 @@ sudo dnf install *.rpm --disablerepo=* {{%/tab%}} - {{%tab name="Redhat-8, Oracle-8"%}} ### Install on RHEL-8 or Oracle Linux 8 @@ -395,7 +395,8 @@ sudo dnf install *.rpm --disablerepo=* #### Step 1: On a system with internet access Place your `nginx-repo.crt` and `nginx-repo.key` files on this system. -```bash + +```shell sudo yum update -y sudo yum install yum-utils tar -y sudo mkdir -p /etc/ssl/nginx/ @@ -414,7 +415,7 @@ EOF sudo yum update -y sudo mkdir -p nms-nap-compiler -sudo yumdownloader --resolve --destdir=nms-nap-compiler nms-nap-compiler-v5.498.0 +sudo yumdownloader --resolve --destdir=nms-nap-compiler nms-nap-compiler-v5.527.0 tar -czvf compiler.tar.gz nms-nap-compiler/ ``` @@ -423,27 +424,23 @@ tar -czvf compiler.tar.gz nms-nap-compiler/ Before running the steps, make sure the OS libraries are up to date, especially `glibc`. Move the `compiler.tar.gz` file from Step 1 to this system. -```bash +```shell sudo yum install tar -y tar -xzvf compiler.tar.gz sudo dnf install --disablerepo=* nms-nap-compiler/*.rpm ``` - {{%/tab%}} - {{}} ---- - ## Set up attack signatures and threat campaigns F5 WAF for NGINX protects your applications using predefined and regularly updated detection patterns: -- **Attack signatures**: Known threat patterns used to detect common vulnerabilities and exploits. These are included with F5 WAF for NGINX and updated frequently to reflect the latest security threats. See the [attack signatures documentation]({{< ref "nap-waf/v5/configuration-guide/configuration.md#attack-signatures-overview" >}}) for more information. +- **Attack signatures**: Known threat patterns used to detect common vulnerabilities and exploits. These are included with F5 WAF for NGINX and updated frequently to reflect the latest security threats. See the [attack signatures documentation]({{< ref "/waf/policies/attack-signatures.md" >}}) for more information. -- **Threat campaigns**: Context-aware threat intelligence based on attack campaigns observed by F5 Threat Labs. These are updated even more frequently than attack signatures and require installation to take effect. Learn more in the [threat campaigns documentation]({{< ref "nap-waf/v5/configuration-guide/configuration.md#threat-campaigns" >}}). +- **Threat campaigns**: Context-aware threat intelligence based on attack campaigns observed by F5 Threat Labs. These are updated even more frequently than attack signatures and require installation to take effect. Learn more in the [threat campaigns documentation]({{< ref "/waf/policies/threat-campaigns.md" >}}). To take advantage of the latest updates, you must upload the attack signature and threat campaign packages to NGINX Instance Manager. @@ -467,24 +464,23 @@ Follow these steps to get and upload the certificate and key: - `nginx-repo.key` (private key) 4. Create a JSON file that includes the contents of both files. Replace newlines (`\n`) in each file with literal `\n` characters so the certificate and key can be formatted correctly inside the JSON. -
      - Example request - - ```json - { - "name": "nginx-repo", - "nginxResourceType": "NginxRepo", - "certPEMDetails": { - "caCerts": [], - "password": "", - "privateKey": "-----BEGIN PRIVATE KEY-----\n[content snipped]\n-----END PRIVATE KEY-----\n", - "publicCert": "-----BEGIN CERTIFICATE-----\n[content snipped]\n-----END CERTIFICATE-----", - "type": "PEM" - } + {{< details summary="Example request" >}} + + ```json + { + "name": "nginx-repo", + "nginxResourceType": "NginxRepo", + "certPEMDetails": { + "caCerts": [], + "password": "", + "privateKey": "-----BEGIN PRIVATE KEY-----\n[content snipped]\n-----END PRIVATE KEY-----\n", + "publicCert": "-----BEGIN CERTIFICATE-----\n[content snipped]\n-----END CERTIFICATE-----", + "type": "PEM" } - ``` + } + ``` -
      + {{< /details >}} 5. Upload the file to NGINX Instance Manager using the REST API: @@ -497,48 +493,49 @@ Follow these steps to get and upload the certificate and key: 6. If successful, you should see a response similar to this: -
      - Example response + {{< details summary="Example response" >}} + + ```json + { + "certAssignmentDetails": [], + "certMetadata": [ + { + "authorityKeyIdentifier": "", + "commonName": "", + "expired": false, + "expiry": 59789838, + "issuer": "C=US, ST=Washington, L=Seattle, Inc., O=F5 Networks\\, OU=Certificate Authority, CN=F5 PRD Issuing Certificate Authority TEEM V1", + "publicKeyType": "RSA (2048 bit)", + "serialNumber": "", + "signatureAlgorithm": "SHA256-RSA", + "subject": "CN=", + "subjectAlternativeName": "", + "subjectKeyIdentifier": "", + "thumbprint": "", + "thumbprintAlgorithm": "SHA256-RSA", + "validFrom": "2021-12-21T16:57:55Z", + "validTo": "2024-12-20T00:00:00Z", + "version": 3 + } + ], + "certPEMDetails": { + "caCerts": [], + "password": "**********", + "privateKey": "**********", + "publicCert": "[content snipped]", + "type": "PEM" + }, + "created": "2023-01-27T23:42:41.587760092Z", + "modified": "2023-01-27T23:42:41.587760092Z", + "name": "nginx-repo", + "serialNumber": "", + "uid": "d08d9f54-58dd-447a-a71d-6fa5aa0d880c", + "validFrom": "2021-12-21T16:57:55Z", + "validTo": "2024-12-20T00:00:00Z" + } + ``` - ```json - { - "certAssignmentDetails": [], - "certMetadata": [ - { - "authorityKeyIdentifier": "", - "commonName": "", - "expired": false, - "expiry": 59789838, - "issuer": "C=US, ST=Washington, L=Seattle, Inc., O=F5 Networks\\, OU=Certificate Authority, CN=F5 PRD Issuing Certificate Authority TEEM V1", - "publicKeyType": "RSA (2048 bit)", - "serialNumber": "", - "signatureAlgorithm": "SHA256-RSA", - "subject": "CN=", - "subjectAlternativeName": "", - "subjectKeyIdentifier": "", - "thumbprint": "", - "thumbprintAlgorithm": "SHA256-RSA", - "validFrom": "2021-12-21T16:57:55Z", - "validTo": "2024-12-20T00:00:00Z", - "version": 3 - } - ], - "certPEMDetails": { - "caCerts": [], - "password": "**********", - "privateKey": "**********", - "publicCert": "[content snipped]", - "type": "PEM" - }, - "created": "2023-01-27T23:42:41.587760092Z", - "modified": "2023-01-27T23:42:41.587760092Z", - "name": "nginx-repo", - "serialNumber": "", - "uid": "d08d9f54-58dd-447a-a71d-6fa5aa0d880c", - "validFrom": "2021-12-21T16:57:55Z", - "validTo": "2024-12-20T00:00:00Z" - } - ``` + {{< /details >}} #### Enable automatic downloads @@ -599,35 +596,50 @@ If you prefer not to enable automatic updates, you can manually update the Attac 4. Download the `.deb` or `.rpm` packages from https://pkgs.nginx.com using your F5 WAF for NGINX cert and key: - For Attack Signatures: package starts with `app-protect-attack-signatures` - Format for `.deb` package: + ```text https://pkgs.nginx.com/app-protect-security-updates//pool/nginx-plus/a/app-protect-attack-signatures/app-protect-attack-signatures_-~_amd64.deb ``` + - Example for `.deb` download: + ```shell curl --key nginx-repo.key --cert nginx-repo.crt https://pkgs.nginx.com/app-protect-security-updates/ubuntu/pool/nginx-plus/a/app-protect-attack-signatures/app-protect-attack-signatures_2025.07.24-1~noble_amd64.deb --output app-protect-attack-signatures_2025.07.24-1~noble_amd64.deb ``` + - Format for `.rpm` package: + ```text https://pkgs.nginx.com/app-protect-security-updates/centos/<8 or 9>/x86_64/RPMS/app-protect-attack-signatures--.el<8 or 9>.ngx.x86_64.rpm ``` + - Example for `.rpm` download: + ```shell curl -v --key nginx-repo.key --cert nginx-repo.crt https://pkgs.nginx.com/app-protect-security-updates/centos/8/x86_64/RPMS/app-protect-attack-signatures-2025.07.24-1.el8.ngx.x86_64.rpm --output app-protect-attack-signatures-2025.07.24-1.el8.ngx.x86_64.rpm ``` + - For Threat Campaigns: package starts with `app-protect-threat-campaigns` - Format for `.deb` package: + ```text https://pkgs.nginx.com/app-protect-security-updates//pool/nginx-plus/a/app-protect-threat-campaigns/app-protect-threat-campaigns_-~_amd64.deb ``` + - Example for `.deb` download: + ```shell curl --key nginx-repo.key --cert nginx-repo.crt https://pkgs.nginx.com/app-protect-security-updates/ubuntu/pool/nginx-plus/a/app-protect-threat-campaigns/app-protect-threat-campaigns_2025.07.29-1~noble_amd64.deb --output app-protect-threat-campaigns_2025.07.29-1~noble_amd64.deb ``` + - Format for `.rpm` package: + ```text https://pkgs.nginx.com/app-protect-security-updates/centos/<8 or 9>/x86_64/RPMS/app-protect-threat-campaigns--.el<8 or 9>.ngx.x86_64.rpm ``` + - Example for `.rpm` download: + ```shell curl -v --key nginx-repo.key --cert nginx-repo.crt https://pkgs.nginx.com/app-protect-security-updates/centos/8/x86_64/RPMS/app-protect-threat-campaigns-2025.07.29-1.el8.ngx.x86_64.rpm --output app-protect-threat-campaigns-2025.07.29-1.el8.ngx.x86_64.rpm ``` @@ -677,8 +689,6 @@ To keep the dashboards accurate and up to date, you need to update the Security For instructions, see the [update signatures guide]({{< ref "/nim/nginx-app-protect/security-monitoring/update-signatures.md" >}}). ---- - ## Set up compiler resource pruning You can configure NGINX Instance Manager to automatically remove unused compiler resources: @@ -693,8 +703,8 @@ Only the compiled bundles are removed. NGINX Instance Manager does not delete th To enable compiler resource pruning: 1. Log in to the NGINX Instance Manager host using SSH. -2. Open the `/etc/nms/nms.conf` file in a text editor. -3. Update the `policy_manager` section under `integrations` with time-to-live (TTL) values for each resource type: +1. Open the `/etc/nms/nms.conf` file in a text editor. +1. Update the `policy_manager` section under `integrations` with time-to-live (TTL) values for each resource type: ```yaml integrations: @@ -727,8 +737,6 @@ To enable compiler resource pruning: NGINX Instance Manager runs the pruning process at startup and every 24 hours after the `nms-integrations` service starts. ---- - ## Onboard F5 WAF for NGINX instances To onboard your F5 WAF for NGINX instances to NGINX Instance Manager, install and configure the NGINX Agent on each instance. @@ -743,7 +751,6 @@ To onboard your F5 WAF for NGINX instances to NGINX Instance Manager, install an {{< include "agent/installation/install-agent-api.md" >}} - ### Configure NGINX Agent 1. Edit the NGINX Agent configuration file to enable support for F5 WAF for NGINX: @@ -782,13 +789,10 @@ To onboard your F5 WAF for NGINX instances to NGINX Instance Manager, install an sudo systemctl restart nginx-agent ``` - - ### Verify installation After installing and configuring the NGINX Agent, verify that your F5 WAF for NGINX instances appear in NGINX Instance Manager. - {{}} {{%tab name="UI"%}} @@ -808,16 +812,11 @@ You should now be able to view your F5 WAF for NGINX instances in the Instance M Use the REST API to confirm the version and status of F5 WAF for NGINX: -{{}} - | Method | Endpoint | |--------|------------------------------| | GET | `/api/platform/v1/instances` | | GET | `/api/platform/v1/systems` | -{{}} - - - Send a `GET` request to `/api/platform/v1/systems` to check version info: **Example response:** @@ -862,7 +861,7 @@ Use the REST API to confirm the version and status of F5 WAF for NGINX: Before configuring Docker Compose, make sure you’ve completed the following steps: -- Installed F5 WAF for NGINX v5 using the [official installation guide]({{< ref "/nap-waf/v5/admin-guide/install.md" >}}). +- Installed F5 WAF for NGINX using the [official installation guide]({{< ref "/waf/install/docker.md" >}}). - Created a `docker-compose.yaml` file during the installation process. This section explains how to modify that file so F5 WAF for NGINX can work with NGINX Instance Manager. @@ -928,8 +927,6 @@ This section explains how to modify that file so F5 WAF for NGINX can work with docker compose restart ``` ---- - ## Onboard security policies {#onboard-security-policies} NGINX Instance Manager provides the same [default security policies](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration) as F5 WAF for NGINX: @@ -988,8 +985,6 @@ To upload a policy, follow these steps: The response includes a list of all security policies managed by NGINX Instance Manager. ---- - ## Add WAF configuration to NGINX instances {#add-waf-config} The [F5 WAF for NGINX configuration guide](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration-overview) shows where and how to add security directives to your NGINX configuration. NGINX Instance Manager includes the same default security policies as F5 WAF for NGINX: @@ -1055,7 +1050,7 @@ If you’re using F5 WAF for NGINX v5: - JSON policies and log profiles aren’t supported. You must precompile and publish them using NGINX Instance Manager. Make sure the precompiled_publication setting in the NGINX Agent configuration is set to true. - See the [F5 WAF for NGINX configuration guide]({{< ref "/nap-waf/v5/configuration-guide/configuration.md" >}}) for details. + See the [F5 WAF for NGINX configuration guide]({{< ref "/waf/policies/configuration.md" >}}) for details. {{}} {{%tab name="UI"%}} @@ -1076,15 +1071,11 @@ If you’re using F5 WAF for NGINX v5: You can use the NGINX Instance Manager REST API to deploy your F5 WAF for NGINX configuration. -{{}} - | Method | Endpoint | |--------|---------------------------------------------------------------------| | GET | `/api/platform/v1/systems/{systemUID}/instances` | | POST | `/api/platform/v1/security/{systemUID}/instances/{nginxUID}/config` | -{{}} - {{< call-out "important" >}}Before deploying a configuration to an instance group, make sure all instances in the group are running the same version of F5 WAF for NGINX. Otherwise, the deployment may fail.{{< /call-out >}} 1. Send a `GET` request to the `/api/platform/v1/systems/{systemUID}/instances` endpoint to list all instances. This response includes the unique identifier (UID) of the instance that you want to update. @@ -1158,8 +1149,6 @@ To confirm that the F5 WAF for NGINX configuration was applied: 4. Select the instance. Then, scroll to the **App Protect Details** section. 5. Confirm that the **F5 WAF for NGINX** status is **Active**, and the **Build** matches the version installed on the instance. ---- - ## Troubleshooting If you're having trouble with F5 WAF for NGINX, try the steps below. If these don't solve the issue, reach out to F5 NGINX Customer Support. @@ -1183,7 +1172,7 @@ F5 WAF for NGINX and the WAF compiler shouldn't run on the same host. To check: rpm -qi | grep app-protect ``` -If F5 WAF for NGINX is installed, follow the [uninstall instructions]({{< ref "/nap-waf/v4/admin-guide/install.md#uninstall-app-protect" >}}). +If F5 WAF for NGINX is installed, follow the [uninstall instructions]({{< ref "/waf/install/uninstall.md" >}}). ### Check that the WAF compiler version matches the F5 WAF for NGINX version @@ -1207,21 +1196,21 @@ sudo /opt/nms-nap-compiler/app_protect-/bin/apcompile -h **Example:** ```shell -sudo /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -h +sudo /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -h ``` **Expected output:** ```text USAGE: - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile Examples: - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -p /path/to/policy.json -o mypolicy.tgz - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -p policyA.json -g myglobal.json -o /path/to/policyA_bundle.tgz - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -g myglobalsettings.json --global-state-outfile /path/to/myglobalstate.tgz - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -b /path/to/policy_bundle.tgz --dump - /opt/nms-nap-compiler/app_protect-5.498.0/bin/apcompile -l logprofA.json -o /path/to/logprofA_bundle.tgz + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -p /path/to/policy.json -o mypolicy.tgz + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -p policyA.json -g myglobal.json -o /path/to/policyA_bundle.tgz + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -g myglobalsettings.json --global-state-outfile /path/to/myglobalstate.tgz + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -b /path/to/policy_bundle.tgz --dump + /opt/nms-nap-compiler/app_protect-5.527.0/bin/apcompile -l logprofA.json -o /path/to/logprofA_bundle.tgz ``` ### Confirm NGINX Agent configuration on the F5 WAF for NGINX instance @@ -1281,9 +1270,7 @@ curl --key /etc/ssl/nginx/nginx-repo.key --cert /etc/ssl/nginx/nginx-repo.crt ht ... ``` ---- - -## What's Next +## Next steps Now that configuration management is set up, you can use the NGINX Instance Manager REST API to: diff --git a/content/nim/nginx-instances/manage-instance-groups.md b/content/nim/nginx-instances/manage-instance-groups.md index 7fcc0e8fa..0c07ce2e2 100644 --- a/content/nim/nginx-instances/manage-instance-groups.md +++ b/content/nim/nginx-instances/manage-instance-groups.md @@ -62,11 +62,11 @@ You can assign NGINX instances to instance groups in the following ways: ### Specify Instance Group in Agent-Dynamic.Conf -You can easily add instances to a default instance group that you specify. To do so, [install the NGINX Agent on an instance]({{< ref "/nms/nginx-agent/install-nginx-agent.md" >}}), then edit the `/var/lib/nginx-agent/agent-dynamic.conf` file as described below. +You can easily add instances to a default instance group that you specify. To do so, [install the NGINX Agent on an instance]({{< ref "/nginx-one/agent/install-upgrade/" >}}), then edit the `/var/lib/nginx-agent/agent-dynamic.conf` file as described below. {{< call-out "note" >}}If you're running Instance Manager 2.10.1 or earlier or NGINX Agent 2.25.1 or earlier, the `agent-dynamic.conf` file is located in `/etc/nginx-agent/`.{{< /call-out >}} -{{< call-out "important" >}}If the specified instance group doesn't already exist, the NGINX Agent installer will create it, using the current instance's config file as the group's config file. This means that all instances added to the group later will use this config as well. If you're using a script to add instances, you should consider carefully which instance to run the script on first.{{< /call-out >}} +{{< call-out "important" "Important:" >}}If the specified instance group doesn't already exist, the NGINX Agent installer will create it, using the current instance's config file as the group's config file. This means that all instances added to the group later will use this config as well. If you're using a script to add instances, you should consider carefully which instance to run the script on first.{{< /call-out >}} 1. Open a secure shell (SSH) connection to the NGINX instance and log in. 2. Open the `/var/lib/nginx-agent/agent-dynamic.conf` for editing. @@ -134,7 +134,7 @@ To add an instance to an instance group when installing the NGINX Agent: sudo sh install.sh --instance-group nginx-01 ``` -{{< call-out "important" >}} +{{< call-out "important" "Important:" >}} If the specified instance group doesn't already exist, the NGINX Agent installer will create it, using the current instance's NGINX config as the group's config file. This means that all instances added to the group later will use this config as well. If you're using a script to add instances, you should consider carefully which instance to run the script on first. {{< /call-out >}} diff --git a/content/nim/nginx-instances/scan-instances.md b/content/nim/nginx-instances/scan-instances.md index dab74144e..64ce534eb 100644 --- a/content/nim/nginx-instances/scan-instances.md +++ b/content/nim/nginx-instances/scan-instances.md @@ -1,18 +1,13 @@ --- -description: Follow the steps in this guide to scan for and discover NGINX instances. -nd-docs: DOCS-828 title: Scan and discover NGINX instances +description: Follow the steps in this guide to scan for and discover NGINX instances. toc: true weight: 110 -type: -- tutorial +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-828 --- - - {{< shortversions "2.0.0" "latest" "nimvers" >}} ## Prerequisites {#prerequisites} @@ -48,8 +43,6 @@ To scan a single address, use the netmask of `/32` after the IP. This is the equ There's a CVE that's not reported for NGINX that involves [unfiltered logging](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4487). This CVE won't be fixed, has a severity of "None," and is excluded from our scans' CVE list. {{< /call-out >}} ---- - ## Scan using the API {#scan-api} To start a scan using the Instance Manager API, send a POST request similar to the following example to the Scan endpoint, `https:///api/platform/v1/servers/scan`. @@ -92,9 +85,7 @@ curl -X GET "https:///api/v1/servers" -H "accept: The result looks similar to the following: -
      - - Scan JSON response +{{< details summary="Scan JSON response" >}} ```json { @@ -271,9 +262,7 @@ The result looks similar to the following: } ``` -
      - ---- +{{< /details >}} ## Troubleshooting diff --git a/content/nim/releases/release-notes.md b/content/nim/releases/release-notes.md index cca9a67c4..f1ea5f93e 100644 --- a/content/nim/releases/release-notes.md +++ b/content/nim/releases/release-notes.md @@ -2,22 +2,18 @@ title: Release notes weight: 100 toc: true -type: reference -product: NIM +nd-content-type: reference +nd-product: NIM nd-docs: DOCS-938 --- The release notes for F5 NGINX Instance Manager highlight the latest features, improvements, and bug fixes in each release. This document helps you stay up to date with the changes and enhancements introduced to improve stability, performance, and usability. For each version, you’ll find details about new features, known issues, and resolved problems, ensuring you get the most out of your NGINX instance management experience. -
      - Support for F5 WAF for NGINX +{{< details summary="Support for F5 WAF for NGINX" >}} {{< include "nim/tech-specs/nim-app-protect-support.md" >}} -
      - - ---- +{{< /details >}} ## 2.20.0 @@ -32,6 +28,7 @@ NGINX Instance Manager 2.20.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-20-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Added support to report on multiple NGINX One subscriptions using a single instance of NGINX Instance Manager** @@ -54,8 +51,8 @@ This release includes the following updates: NGINX Instance Manager now sends improved web analytics to F5. This helps F5 understand common use cases of NGINX Instance Manager and improve functionality. - ### Changes in Default Behavior{#2-20-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Breadcrumbs added for Overview, Manage, and Config templates** @@ -74,26 +71,20 @@ This release has the following changes in default behavior: - {{% icon-feature %}} **Automatic feature enablement in disconnected instances on license upload** - NGINX Instance Manager now enables all features by default when you upload a license to a disconnected instance. This update ensures customers can begin using the full capabilities of the system without additional configuration. Customers have a 90-day window to complete the full license process. If they don’t complete it within this period, the instance is automatically deactivated. - - ### Resolved Issues{#2-20-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. - {{% icon-resolved %}} The certificate stats are not displayed correctly in the Certificates and Keys page as well as the Dashboard page. (45991) - ### Known Issues{#2-20-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.19.2 May 06, 2025 @@ -107,20 +98,17 @@ NGINX Instance Manager 2.19.2 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-19-2-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements for a more reliable experience. - - ### Known Issues{#2-19-2-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.19.1 March 27, 2025 @@ -134,13 +122,13 @@ NGINX Instance Manager 2.19.1 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-19-1-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements for a more reliable experience. - ### Resolved Issues{#2-19-1-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -148,13 +136,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Publishing the NAP policy fails with the error “The attack signatures with the given version was not found” (45845) - {{% icon-resolved %}} Automatic downloading of NAP compiler versions 5.210.0 and 5.264.0 fails on Ubuntu 24.04 (45846) - ### Known Issues{#2-19-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.19.0 February 06, 2025 @@ -174,6 +159,7 @@ NGINX Instance Manager 2.19 is the first iteration of NGINX Instance Manager as Instance Manager 2.19 will not be compatible or supported with EoS API Connectivity Manager. API Connectivity Manager users get support of Instance Manager up to 2.18 and upgrades to Instance Manager 2.19 will not succeed if API Connectivity Manager is installed. ### What's New{#2-19-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **NGINX Instance Manager is now a standalone product** @@ -206,8 +192,8 @@ This release includes the following updates: We have added a new option to export templates using the NGINX Instance Manager web interface. - ### Changes in Default Behavior{#2-19-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Prompt to specify an FQDN for NIM when generating SSL certificates during installation** @@ -224,7 +210,6 @@ This release has the following changes in default behavior: Starting in 2.19.0, remote certificates that are expired are removed from the web interface after 30 days. - ### Resolved Issues{#2-19-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -235,13 +220,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} NGINX configuration error messages overlap outside the error window (45570) - {{% icon-resolved %}} Syntax errors while saving template configuration (45573) - ### Known Issues{#2-19-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.18.0 November 08, 2024 @@ -255,6 +237,7 @@ NGINX Instance Manager 2.18.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-18-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Resilient Docker Compose NGINX Instance Manager deployment** @@ -273,7 +256,7 @@ This release includes the following updates: - **Send the report to F5**: Submit the report to F5 for verification from a location with internet access. - **Upload the acknowledgment**: After verification, upload the acknowledgment from F5 to NGINX Instance Manager. -- {{% icon-feature %}} **Ridiculously easy NGINX Instance Manager installation script (Bash)** +- {{% icon-feature %}} **Ridiculously easy NGINX Instance Manager installation script (shell)** Reduce the number of steps to deploy all NGINX Instance Manager components, including prerequisites, using a single [installation script]({{< ref "nim/deploy/vm-bare-metal/install.md" >}}). The script supports every OS that NGINX Instance Manager supports in the [technical specifications]({{< ref "nim/fundamentals/tech-specs.md" >}}). @@ -283,19 +266,18 @@ This release includes the following updates: - {{% icon-feature %}} **Adds support for F5 WAF for NGINX v5.3 and v4.11** - NGINX Instance Manager 2.18.0 adds support for [F5 WAF for NGINX v5.3 and v4.11]({{< relref "nap-waf/v5/admin-guide/overview.md" >}}). + NGINX Instance Manager 2.18.0 adds support for [F5 WAF for NGINX v5.3 and v4.11]({{< ref "/waf/changelog" >}}). F5 WAF for NGINX v5, designed for both NGINX Open Source and NGINX Plus environments, includes a dynamic NGINX module and containerized WAF services. It provides robust security and scalability. - ### Changes in Default Behavior{#2-18-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **The NGINX Usage page now only shows instances configured with the NGINX Plus R33 mgmt block.** The “NGINX Usage” page previously displayed instances connected to NGINX Instance Manager through multiple methods, including the NGINX Agent, health checks, and the `mgmt` block in NGINX Plus R31-R32. With the introduction of native reporting in NGINX Plus R33, only instances using this feature appear on the page, preventing duplicates. For more information on R33 usage reporting, see [About subscription licenses]({{< ref "solutions/about-subscription-licenses.md" >}}). - ### Resolved Issues{#2-18-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -304,13 +286,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Failure to notify user when template configuration publish fails (44975) - {{% icon-resolved %}} Mismatch in date formats in custom date selection on NGINX usage graph (45512) - ### Known Issues{#2-18-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.17.4 November 06, 2024 @@ -324,20 +303,17 @@ NGINX Instance Manager 2.17.4 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-17-4-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements. - - ### Known Issues{#2-17-4-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.17.3 September 13, 2024 @@ -351,14 +327,13 @@ NGINX Instance Manager 2.17.3 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-17-3-whats-new} + This release includes the following updates: - {{% icon-feature %}} **GPG key update for NGINX Agent packages** Previous releases of NGINX Instance Manager included NGINX Agent packages signed with an expired GPG key. This release of NGINX Instance Manager includes updated keys, allowing users to successfully download the NGINX Agent from NGINX Instance Manager. - - ### Known Issues{#2-17-3-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. @@ -378,20 +353,17 @@ NGINX Instance Manager 2.17.2 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-17-2-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements for a more reliable experience. - - ### Known Issues{#2-17-2-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.17.1 July 24, 2024 @@ -405,20 +377,17 @@ NGINX Instance Manager 2.17.1 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-17-1-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements for a more reliable experience. - - ### Known Issues{#2-17-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.17.0 July 10, 2024 @@ -432,6 +401,7 @@ NGINX Instance Manager 2.17.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-17-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Adds support for F5 WAF for NGINX v5** @@ -460,8 +430,8 @@ This release includes the following updates: This release includes access to a single Docker image for running NGINX Instance Manager as a container. This allows customers to deploy Instance Manager locally with a single "docker run" command. For more details, see [Deploy NGINX Instance Manager in a Single Docker Container]({{< ref "/nim/deploy/docker/deploy-nginx-instance-manager-docker-compose.md" >}}). - ### Changes in Default Behavior{#2-17-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Web Analytics** @@ -482,7 +452,6 @@ This release has the following changes in default behavior: Please upgrade your environment to one of the [supported distributions]({{< ref "/nim/fundamentals/tech-specs.md#supported-distributions" >}}) to continue using NGINX Instance Manager. - ### Resolved Issues{#2-17-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -492,13 +461,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Editing template submissions uses the latest versions, may cause "malformed" errors (44961) - {{% icon-resolved %}} Editing template submissions now allows for using most recent template version (44971) - ### Known Issues{#2-17-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.16.0 April 18, 2024 @@ -512,6 +478,7 @@ NGINX Instance Manager 2.16.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-16-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Introducing configuration templates for simplifying NGINX configurations and self-service workflows** @@ -524,28 +491,24 @@ This release includes the following updates: This release enhances system stability and performance. - ### Changes in Default Behavior{#2-16-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Change in NGINX Agent upgrade behavior** Starting from version v2.31.0, the NGINX Agent will automatically restart itself during an upgrade. - ### Resolved Issues{#2-16-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. - {{% icon-resolved %}} Upgrading to 2.12 disables telemetry (43606) - ### Known Issues{#2-16-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.15.1 February 14, 2024 @@ -559,13 +522,13 @@ NGINX Instance Manager 2.15.1 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-15-1-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements. - ### Resolved Issues{#2-15-1-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -573,13 +536,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Helm chart backup and restore is broken in NIM 2.15.0 (44758) - {{% icon-resolved %}} Unable to use NMS Predefined Log Profiles for NAP 4.7 (44759) - ### Known Issues{#2-15-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.15.0 December 12, 2023 @@ -593,13 +553,13 @@ NGINX Instance Manager 2.15.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-15-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Support for CA Certificates added** Instance Manager now allows for managing CA Certificates to fully support NGINX directives such as _proxy_ssl_trusted_ and _proxy_ssl_verify_. The main difference after this change is that you no longer need a corresponding key to upload a certificate to Instance Manager. - ### Resolved Issues{#2-15-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -610,13 +570,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Missing Data when ClickHouse services are not running (44586) - {{% icon-resolved %}} NGINX App Protect Attack Signature, Threat Campaign and Compiler fail to download (44603) - ### Known Issues{#2-15-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.14.1 October 19, 2023 @@ -630,20 +587,17 @@ NGINX Instance Manager 2.14.1 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-14-1-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Stability and performance improvements** This release includes stability and performance improvements. - - ### Known Issues{#2-14-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.14.0 October 16, 2023 @@ -657,6 +611,7 @@ NGINX Instance Manager 2.14.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-14-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Instance Manager Dashboard** @@ -672,15 +627,14 @@ This release includes the following updates: This release of Instance Manager has been tested and is compatible with Clickhouse LTS versions 22.3.15.33 to 23.8. - ### Changes in Default Behavior{#2-14-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Inactive NGINX instances are automatically removed over time** If an NGINX instance has been inactive (NGINX Agent not reporting to NGINX Management Suite) for a fixed amount of time, it is now automatically removed from the instances list. Instances deployed in a virtual machine or hardware are removed after 72 hours of inactivity, and those deployed in a container are removed after 12 hours. - ### Resolved Issues{#2-14-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -691,13 +645,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} NGINX Agent does not report NGINX App Protect status (44531) - {{% icon-resolved %}} Issues sorting HTTP errors in the dashboard (44536) - ### Known Issues{#2-14-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.13.1 September 05, 2023 @@ -717,13 +668,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Validation errors in Resource Groups for certificates uploaded before 2.13 upgrade (44254) - {{% icon-resolved %}} Access levels cannot be assigned to certain RBAC features (44277) - ### Known Issues{#2-13-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.13.0 August 28, 2023 @@ -737,6 +685,7 @@ NGINX Instance Manager 2.13.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-13-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Easily manage access to specific objects with Resource Groups** @@ -773,7 +722,6 @@ This release includes the following updates: In the log output, extra whitespace has been removed, and brackets have been removed from the log `level` field. This results in clean, parsable log output, particularly when using JSON log encoding. - ### Resolved Issues{#2-13-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -782,13 +730,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Error: "Failed to create secret" when reinstalling or upgrading NGINX Management Suite in Kubernetes (42967) - {{% icon-resolved %}} An "unregistered clickhouse-adapter" failure is logged every few seconds if logging is set to debug. (43438) - ### Known Issues{#2-13-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.12.0 July 20, 2023 @@ -802,13 +747,13 @@ NGINX Instance Manager 2.12.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-12-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **New support for license tokens for automatic entitlement updates, renewals, and Flexible Consumption Reporting** NGINX Management Suite now supports license tokens formatted as a JSON Web Token (JWT). With JWT licensing, you can automatically update entitlements during subscription renewals or amendments, and you can automate reporting for the Flexible Consumption Program (FCP). For more information, see the [Add a License]({{< ref "/nim/admin-guide/add-license.md" >}}) topic. - ### Resolved Issues{#2-12-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -821,13 +766,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} A JWT license for an expired subscription cannot be terminated from the web interface (43580) - {{% icon-resolved %}} On Kubernetes, uploading a JWT license for NGINX Management Suite results in the error "secret not found" (43655) - ### Known Issues{#2-12-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.11.0 June 12, 2023 @@ -841,6 +783,7 @@ NGINX Instance Manager 2.11.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-11-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **The config editor now lets you see auxiliary files** @@ -867,8 +810,8 @@ This release includes the following updates: In order to enhance product development and support the success of our users with NGINX Management Suite, we offer the option to send limited telemetry data to F5 NGINX. This data provides valuable insights into software usage and adoption. By default, telemetry is enabled, but you have the flexibility to disable it through the web interface or API. For detailed information about the transmitted data, please refer to our documentation. - ### Changes in Default Behavior{#2-11-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **The location of agent-dynamic.conf has changed** @@ -895,7 +838,6 @@ This release has the following changes in default behavior: These changes aim to improve the overall security of the system by restricting access to sensitive configuration files while maintaining necessary privileges for authorized users. - ### Resolved Issues{#2-11-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -907,13 +849,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Certificate file is not updated automatically under certain conditions (42425) - {{% icon-resolved %}} Certificate updates allow for multiples certs to share the same serial number (42429) - ### Known Issues{#2-11-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.10.1 May 22, 2023 @@ -932,13 +871,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Valid licenses incorrectly identified as invalid (42598) - ### Known Issues{#2-10-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.10.0 April 26, 2023 @@ -952,6 +888,7 @@ NGINX Instance Manager 2.10.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-10-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **New "Category" Filter in the Events web interface** @@ -987,8 +924,8 @@ This release includes the following updates: The NGINX Agent installation script now has a flag to enable the default configuration required for the Security Monitoring module. - ### Changes in Default Behavior{#2-10-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Improvements to Role Based Access Control for SSL Certificate and Key management** @@ -999,7 +936,6 @@ This release has the following changes in default behavior: When NGINX Management Suite is installed using a Helm Chart, it now defaults to a ClusterIP without an external IP address. - ### Resolved Issues{#2-10-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1008,13 +944,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Configuration changes for NGINX Agent take longer than expected. (41257) - {{% icon-resolved %}} SELinux errors encountered when starting NGINX Management Suite on RHEL9 with the SELinux policy installed (41327) - ### Known Issues{#2-10-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.9.1 April 06, 2023 @@ -1033,7 +966,6 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} NGINX configurations with special characters may not be editable from the web interface after upgrading Instance Manager (41557) - ### Known Issues{#2-9-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. @@ -1053,6 +985,7 @@ NGINX Instance Manager 2.9.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-9-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **New webpages for viewing Attack Signature and Threat Campaigns** @@ -1081,7 +1014,6 @@ This release includes the following updates: - [Publish Configs with Hash Versioning to Instances]({{< ref "/nim/nginx-configs/publish-configs.md#publish-configs-instances-hash-versioning" >}}) - [Publish Configs with Hash Versioning to Instance Groups]({{< ref "/nim/nginx-configs/publish-configs.md#publish-configs-instance-groups-hash-versioning" >}}) - ### Security Updates{#2-9-0-security-updates} {{< call-out "important" >}} @@ -1100,7 +1032,7 @@ This release includes the following security updates: #### Mitigation - - Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< ref "/nms/nginx-agent/install-nginx-agent.md#configuring-the-nginx-agent ">}}) section of NGINX Management Suite documentation. If trace-level logging is required, ensure only trusted users have access to the log files. + - Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< ref "/nginx-one/agent/configure-instances/configuration-overview/">}}) section of the documentation. If trace-level logging is required, ensure only trusted users have access to the log files. #### Fixed in @@ -1109,8 +1041,8 @@ This release includes the following security updates: For more information, refer to the MyF5 article [K000133135](https://my.f5.com/manage/s/article/K000133135). - ### Changes in Default Behavior{#2-9-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **SSL Certificates can be associated with Instance Groups** @@ -1143,18 +1075,16 @@ This release has the following changes in default behavior: 1. Save the file. 1. Restart NGINX Management Suite: - ```bash + ```shell sudo systemctl restart nms ``` 1. Restart the NGINX web server: - ```bash + ```shell sudo systemctl restart nginx ``` -
      - #### Option 2 1. Before upgrading Instance Manager, edit the following files with your desired OIDC configuration settings: @@ -1168,17 +1098,16 @@ This release has the following changes in default behavior: 1. After the upgrade finishes replace `etc/nms/nginx/oidc/openid_connect.js` with `openid_connect.js.dpkg-dist`. 1. Restart NGINX Management Suite: - ```bash + ```shell sudo systemctl restart nms ``` 1. Restart the NGINX web server: - ```bash + ```shell sudo systemctl restart nginx ``` - ### Resolved Issues{#2-9-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1193,13 +1122,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Precompiled Publication setting is reverted to false after error publishing NGINX App Protect policy (40484) - {{% icon-resolved %}} Upgrading NGINX Management Suite may remove the OIDC configuration for the platform (41328) - ### Known Issues{#2-9-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.8.0 January 30, 2023 @@ -1213,6 +1139,7 @@ NGINX Instance Manager 2.8.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-8-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Enhanced details page for SSL Certificates** @@ -1227,8 +1154,8 @@ This release includes the following updates: The messaging around [security policy compilation errors]({{< ref "/nim/nginx-app-protect/manage-waf-security-policies.md#check-for-compilation-errors" >}}) has been improved by providing more detailed information and alerting users if the required compiler version is missing. - ### Changes in Default Behavior{#2-8-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Switching between storing secrets on disk and using Vault migrates secrets** @@ -1252,7 +1179,6 @@ This release has the following changes in default behavior: precompiled_publication: true ``` - ### Resolved Issues{#2-8-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1272,13 +1198,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Automatic downloads of attack signatures and threat campaigns are not supported on CentOS 7, RHEL 7, or Amazon Linux 2 (40396) - {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled (44433) - ### Known Issues{#2-8-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.7.0 December 20, 2022 @@ -1292,6 +1215,7 @@ NGINX Instance Manager 2.7.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### Changes in Default Behavior{#2-7-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **NGINX App Protect upgrades are supported** @@ -1306,7 +1230,6 @@ This release has the following changes in default behavior: When upgrading NGINX Agent, the existing NGINX Agent configuration is maintained during the upgrade. If the Agent configuration is not present in `/etc/nginx-agent/nginx-agent.conf`, a default configuration is provided after NGINX Agent installation. - ### Resolved Issues{#2-7-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1320,13 +1243,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} When upgrading a multi-node NMS deployment with helm charts the ingestion pod may report a "Mismatched migration version" error (38880) - {{% icon-resolved %}} After a version upgrade of NGINX Instance Manager, NMS Data Plane Manager crashes if you publish NGINX configuration with App Protect enablement directive (app_protect_enable) set to ON (38904) - ### Known Issues{#2-7-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.6.0 November 17, 2022 @@ -1340,6 +1260,7 @@ NGINX Instance Manager 2.6.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-6-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Manage and deploy configurations to F5 WAF for NGINX Instances** @@ -1367,15 +1288,14 @@ This release includes the following updates: Oracle 8 is now [a supported distribution]({{< ref "/nim/fundamentals/tech-specs#distributions" >}}) starting with Instance Manager 2.6. You can use the RedHat/CentOS distro to install the Oracle 8 package. - ### Changes in Default Behavior{#2-6-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **GET Roles API responses now include user and group associations** `GET /roles` and `GET/roles/{roleName}` API responses include any user(s) or group(s) associated with a role now. - ### Resolved Issues{#2-6-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1383,13 +1303,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Password error "option unknown" occurs when installing NGINX Instance Manager on Ubuntu with OpenSSL v1.1.0 (33055) - {{% icon-resolved %}} Instance Manager reports the F5 WAF for NGINX build number as the version (37510) - ### Known Issues{#2-6-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.5.1 October 11, 2022 @@ -1408,13 +1325,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Extended NGINX metrics aren't reported for NGINX Plus R26 and earlier (37738) - ### Known Issues{#2-5-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.5.0 October 04, 2022 @@ -1428,6 +1342,7 @@ NGINX Instance Manager 2.5.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-5-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Track NGINX Plus usage over time** @@ -1438,7 +1353,6 @@ This release includes the following updates: Each release of Instance Manager now includes a helm chart, which you can use to easily [install Instance Manager on Kubernetes]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). You can download the helm charts from [MyF5](https://my.f5.com/manage/s/downloads). - ### Resolved Issues{#2-5-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1447,13 +1361,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Managed certificates may be overwritten if they have the same name on different datapath certificates (36240) - {{% icon-resolved %}} Scan overview page doesn't scroll to show the full list of instances (36514) - ### Known Issues{#2-5-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.4.0 August 16, 2022 @@ -1467,6 +1378,7 @@ NGINX Instance Manager 2.4.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-4-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Get notified about critical events** @@ -1477,8 +1389,8 @@ This release includes the following updates: Now, when you [view your NGINX Plus inventory]({{< ref "/nim/admin-guide/report-usage-connected-deployment.md" >}}), you can see which instances have [NGINX App Protect](https://www.nginx.com/products/nginx-app-protect/) installed. NGINX App Protect is a modern app‑security solution that works seamlessly in DevOps environments as a robust WAF or app‑level DoS defense, helping you deliver secure apps from code to customer. - ### Changes in Default Behavior{#2-4-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **You no longer need to associate a certificate with an instance using the web interface** @@ -1489,20 +1401,16 @@ This release has the following changes in default behavior: This release adds a new service called `nms-integerations`. This service is for future integrations; no user management or configuration is needed at this time. - ### Resolved Issues{#2-4-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. - {{% icon-resolved %}} Unable to publish config changes to a custom nginx.conf location (35276) - ### Known Issues{#2-4-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.3.1 July 21, 2022 @@ -1531,14 +1439,10 @@ This release includes the following security updates: For more information, refer to the AskF5 article [K37080719](https://support.f5.com/csp/article/K37080719). - - ### Known Issues{#2-3-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.3.0 June 30, 2022 @@ -1552,6 +1456,7 @@ NGINX Instance Manager 2.3.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-3-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Instance Manager provides information about your F5 WAF for NGINX installations** @@ -1585,28 +1490,24 @@ This release includes the following updates: Refer to the [Technical Specifications Guide]({{< ref "/nim/fundamentals/tech-specs" >}}) for details. - ### Changes in Default Behavior{#2-3-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **New login screen** Sometimes it's the small things that count. Now, when logging in to NGINX Instance Manager, you're treated to an attractive-looking login screen instead of a bland system prompt. 🤩 - ### Resolved Issues{#2-3-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. - {{% icon-resolved %}} Post-install steps to load SELinux policy are in the wrong order (34276) - ### Known Issues{#2-3-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.2.0 May 25, 2022 @@ -1620,6 +1521,7 @@ NGINX Instance Manager 2.2.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-2-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **New events for NGINX processes and configuration rollbacks** @@ -1636,7 +1538,7 @@ This release includes the following updates: - {{% icon-feature %}} **Modules field added to Metrics and Dimensions catalogs** - A `modules` field was added to the [Metics]({{< ref "nms/reference/catalogs/metrics.md" >}}) and [Dimensions]({{< ref "nms/reference/catalogs/dimensions.md" >}}) catalogs. This field indicates which module or modules the metric or dimension belongs to. + A `modules` field was added to the [Metics]({{< ref "/nim/monitoring/catalogs/metrics.md" >}}) and [Dimensions]({{< ref "/nim/monitoring/catalogs/dimensions.md" >}}) catalogs. This field indicates which module or modules the metric or dimension belongs to. - {{% icon-feature %}} **Adds reporting for NGINX worker metrics (API only)** @@ -1647,20 +1549,16 @@ This release includes the following updates: - The count of NGINX workers - CPU, IO, and memory usage - ### Resolved Issues{#2-2-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. - {{% icon-resolved %}} Running Agent install script with sh returns “not found” error (33385) - ### Known Issues{#2-2-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.1.0 April 05, 2022 @@ -1674,6 +1572,7 @@ NGINX Instance Manager 2.1.0 supports upgrades from these previous versions: If your NGINX Instance Manager version is older, you may need to upgrade to an intermediate version before upgrading to the target version. ### What's New{#2-1-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **Adds Docker support for NGINX Agent** @@ -1716,8 +1615,8 @@ This release includes the following updates: For instructions, see [Install from a Helm Chart]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). - ### Changes in Default Behavior{#2-1-0-changes-in-behavior} + This release has the following changes in default behavior: - {{% icon-feature %}} **Tags are no longer enforced for RBAC or set when creating or updating a role** @@ -1749,7 +1648,6 @@ This release has the following changes in default behavior: } ``` - ### Resolved Issues{#2-1-0-resolved-issues} This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic for more information on the latest resolved issues. Use your browser's search function to find the issue ID in the page. @@ -1760,13 +1658,10 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} When using Instance Groups, tag-based access controls are not enforced (31267) - {{% icon-resolved %}} Bad Gateway (502) errors with Red Hat 7 (31277) - ### Known Issues{#2-1-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.0.1 January 27, 2022 @@ -1786,19 +1681,16 @@ This release fixes the following issues. Check the [Known Issues]({{< ref "/nim/ - {{% icon-resolved %}} Unable to access the NGINX Instance Manager web interface after loading SELinux policy (31583) - {{% icon-resolved %}} The `nms-dpm` service restarts when registering multiple NGINX Agents with the same identity (31612) - ### Known Issues{#2-0-1-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. ---- - ## 2.0.0 December 21, 2021 - ### What's New{#2-0-0-whats-new} + This release includes the following updates: - {{% icon-feature %}} **(Experimental) Share a configuration across multiple instances** @@ -1807,7 +1699,7 @@ This release includes the following updates: - {{% icon-feature %}} **More metrics and instance dashboards** - Instance Manager now collects additional metrics from the NGINX instances. We also added pre-configured dashboards to the web interface for each NGINX instance managed by Instance Manager. See the [Catalog Reference]({{< ref "/nms/reference/catalogs/_index.md" >}}) documentation for a complete list of metrics. + Instance Manager now collects additional metrics from the NGINX instances. We also added pre-configured dashboards to the web interface for each NGINX instance managed by Instance Manager. See the [Catalog Reference]({{< ref "/nim/monitoring/catalogs/" >}}) documentation for a complete list of metrics. - {{% icon-feature %}} **New architecture!** @@ -1817,8 +1709,6 @@ This release includes the following updates: Instance Manager 2.x. allows you to create user access controls with tags. Administrators can grant users read or write access to perform instance management tasks. And admins can grant or restrict access to the Settings options, such as managing licenses and creating users and roles. See the [Set up Authentication]({{< ref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md#rbac" >}}) guide for more details. - - ### Known Issues{#2-0-0-known-issues} You can find information about known issues in the [Known Issues]({{< ref "/nim/releases/known-issues.md" >}}) topic. diff --git a/content/nim/support/support-package.md b/content/nim/support/support-package.md index ed3678a21..72a104490 100644 --- a/content/nim/support/support-package.md +++ b/content/nim/support/support-package.md @@ -14,7 +14,7 @@ type: The support package script can be used to collect information about your system for troubleshooting and debugging issues. -The script collects system and service information and then packages the data into a tar archive, which you can share with [NGINX Customer Support]({{< ref "/nms/support/contact-support.md" >}}). +The script collects system and service information and then packages the data into a tar archive, which you can share with [NGINX Customer Support]({{< ref "/nim/support/contact-support.md" >}}). ## Usage diff --git a/content/nim/system-configuration/configure-forward-proxy.md b/content/nim/system-configuration/configure-forward-proxy.md index 54145a071..ba7528677 100644 --- a/content/nim/system-configuration/configure-forward-proxy.md +++ b/content/nim/system-configuration/configure-forward-proxy.md @@ -4,7 +4,6 @@ weight: 2 toc: true type: how-to product: NIM -docs: --- ## Overview diff --git a/content/nim/system-configuration/configure-high-availability.md b/content/nim/system-configuration/configure-high-availability.md index 5e679df7e..6bccae079 100644 --- a/content/nim/system-configuration/configure-high-availability.md +++ b/content/nim/system-configuration/configure-high-availability.md @@ -4,7 +4,6 @@ weight: 3 toc: true type: how-to product: NGINX Instance Manager -docs: --- ## Overview @@ -26,7 +25,7 @@ This guide shows how to configure HA for NGINX Instance Manager using `keepalive Before setting up high availability (HA) for NGINX Instance Manager, make sure you have: -- Two physical servers with NGINX Instance Manager installed +- Two servers with NGINX Instance Manager installed - A reserved virtual IP address (VIP) that always points to the active instance - An NFS share that both servers can access - Permissions to manage IP addresses at the operating system level @@ -259,4 +258,4 @@ If failover does not work as expected, check the following: ## Need help? -For additional support, visit the [F5 Support Portal](https://support.f5.com). \ No newline at end of file +For additional support, visit the [F5 Support Portal](https://support.f5.com). diff --git a/content/nim/system-configuration/secure-traffic.md b/content/nim/system-configuration/secure-traffic.md index 4dbfd64e4..8612bd6dd 100644 --- a/content/nim/system-configuration/secure-traffic.md +++ b/content/nim/system-configuration/secure-traffic.md @@ -1,10 +1,10 @@ --- -nd-docs: DOCS-794 title: Secure client access and network traffic toc: true weight: 600 -type: -- tutorial +nd-content-type: how-to +nd-product: NIM +nd-docs: DOCS-794 --- {{< include "nim/decoupling/note-legacy-nms-references.md" >}} @@ -17,12 +17,10 @@ With NGINX Plus R33, telemetry data must be reported to a usage reporting endpoi {{< call-out "important" >}}Never expose your management server to the public internet. The settings in this guide reduce risk, but they can't replace keeping your server inaccessible to unauthorized users.{{< /call-out >}} -{{< call-out "tip" "See also:" "fa-solid fa-book" >}} +{{< call-out "tip" "See also:" >}} - To learn how to secure traffic for NGINX Agent, see [NGINX Agent TLS Settings](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/). - For details on NGINX Plus entitlement and usage reporting, see [About subscription licenses]({{< ref "solutions/about-subscription-licenses.md" >}}).{{< /call-out >}} ---- - ## NGINX Proxy SSL Termination SSL termination is the process where SSL-encrypted traffic is decrypted at the proxy, in this case, NGINX Instance Manager. Once decrypted, the traffic can be sent to its destination unencrypted or re-encrypted, depending on the configuration. @@ -33,8 +31,7 @@ Starting with NGINX Plus R33, you must also enable `ssl_verify` to verify the SS The example below shows how to set up SSL termination for NGINX Instance Manager: -
      - /etc/nginx/conf.d/nms-http.conf +{{< details summary="/etc/nginx/conf.d/nms-http.conf" >}} ```nginx # Main external HTTPS server, needs port 443 @@ -55,11 +52,7 @@ server { ssl_client_certificate /etc/nms/certs/ca.pem; ``` -
      - -
      - ---- +{{< /details >}} ## Mutual Client Certificate Authentication Setup (mTLS) @@ -86,15 +79,15 @@ Follow these steps to set up mTLS using a Public Key Infrastructure (PKI) system To generate the necessary certificates, follow these steps. You can modify these instructions to suit your specific environment. 1. **Install OpenSSL** (if it isn't installed already). -2. **Create the certificate generation script**: +1. **Create the certificate generation script**: - Use the following example script to generate the certificates for your CA, server, and client. Save the script as `make_certs.sh`. -
      - make_certs.sh + {{< details summary="make_certs.sh" >}} + - ```bash - #!/bin/bash + ```shell + #!/bin/shell set -e make_ca() { @@ -186,13 +179,12 @@ To generate the necessary certificates, follow these steps. You can modify these make_agent ``` -

      + {{< /details >}} 3. **Place the configuration files**: - Put the following OpenSSL `.cnf` files in the same directory as the `make_certs.sh` script. These files are needed to configure the certificate authority and generate the appropriate certificates. -
      - ca.cnf + {{< details summary="ca.cnf" >}} {{}} {{}} @@ -218,10 +210,9 @@ To generate the necessary certificates, follow these steps. You can modify these subjectKeyIdentifier = hash ``` -
      + {{< /details >}} -
      - ca-intermediate.cnf + {{< details summary="intermediate.cnf" >}} ``` yaml [req] @@ -245,10 +236,9 @@ To generate the necessary certificates, follow these steps. You can modify these subjectKeyIdentifier = hash ``` -
      + {{< /details >}} -
      - server.cnf + {{< details summary="server.cnf" >}} ``` yaml [req] @@ -279,10 +269,9 @@ To generate the necessary certificates, follow these steps. You can modify these IP.1 = ``` -
      + {{< /details >}} -
      - agent.cnf + {{< details summary="agent.cnf" >}} ``` yaml [req] @@ -307,12 +296,12 @@ To generate the necessary certificates, follow these steps. You can modify these extendedKeyUsage = critical, clientAuth ``` -

      + {{< /details >}} 4. **Run the script**: - After saving the script, make it executable and run it to generate the certificates. - ```bash + ```shell sudo chmod +x ./make_certs.sh sudo ./make_certs.sh ``` @@ -320,7 +309,7 @@ To generate the necessary certificates, follow these steps. You can modify these 5. **Copy the certificates to the NGINX instance**: - Once generated, copy the ca.pem, agent.crt, and agent.key files to the NGINX instance where the NGINX Agent certificates will be installed. - ```bash + ```shell sudo mkdir -p /etc/nms/certs sudo cp ca.pem /etc/nms/certs/ sudo cp agent.crt /etc/nms/certs/ @@ -334,8 +323,7 @@ To generate the necessary certificates, follow these steps. You can modify these {{< call-out "note" >}}For additional information about TLS configurations for the NGINX Agent, refer to the [NGINX Agent TLS Settings](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/) topic. {{< /call-out>}} -
      - /etc/nginx-agent/nginx-agent.conf + {{< details summary="/etc/nginx-agent/nginx-agent.conf" >}} ```yaml {hl_lines=[8,22,23,24,25]} # @@ -383,7 +371,7 @@ To generate the necessary certificates, follow these steps. You can modify these bulk_size: 20 # specify metrics poll interval report_interval: 1m - collection_interval: 15s + collection_interval: 15s mode: aggregated # OSS NGINX default config path @@ -391,11 +379,11 @@ To generate the necessary certificates, follow these steps. You can modify these config_dirs: "/etc/nginx:/usr/local/etc/nginx" ``` -
      + {{< /details >}} 7. Copy `ca.pem`, `server.crt`, and `server.key` to NGINX Instance Manager. - ```bash + ```shell sudo cp ca.pem /etc/nms/certs/ sudo cp server.crt /etc/nms/certs/ sudo cp server.key /etc/nms/certs/ @@ -452,19 +440,17 @@ To generate the necessary certificates, follow these steps. You can modify these 9. **Reload NGINX proxy configuration**: - Apply the new settings by reloading NGINX proxy configuration. - ```bash + ```shell sudo nginx -s reload ``` 10. **Restart NGINX Agent**: - Start or restart NGINX Agent to apply the changes. - ```bash + ```shell sudo systemctl restart nginx-agent ``` ---- - ## Configure SSL verification for usage reporting with self-signed certificates {#configure-ssl-verify} {{}} @@ -509,8 +495,6 @@ mgmt { } ``` ---- - ## Troubleshooting If NGINX Agent and NGINX Instance Manager are having communication issues, follow these steps to troubleshoot: diff --git a/content/nms/CHANGELOG.txt b/content/nms/CHANGELOG.txt deleted file mode 100644 index 41109aefa..000000000 --- a/content/nms/CHANGELOG.txt +++ /dev/null @@ -1,1652 +0,0 @@ -# CHANGELOG - - - -## 0.211.0 -2023-02-23 - -### Features - -- Merge branch 'docs-1121-http-method' into 'staging' (f2066af6f103fe29f81057883dc27d7cf9427fcb) - -## 0.210.0 -2023-02-23 - -### Features - -- Merge branch 'fix-helm-param-names' into 'staging' (70c6a6990127cba315647efe571578cc6181f2b4) - -## 0.209.0 -2023-02-23 - -### Features - -- Merge branch 'agent-install-docker' into 'staging' (7e8c724ad972c595dcf795629ae00dc4b42871f3) - -## 0.208.0 -2023-02-23 - -### Features - -- Merge branch 'fix-warm-sync-scenario' into 'staging' (b905cc34ac9f53e685d3cad65bd8074d8beafff4) - -## 0.207.0 -2023-02-22 - -### Features - -- Merge branch 'nms-scaling' into 'staging' (fa3a6126ee4495234eeb9a7a5436209c1153433b) - -## 0.206.0 -2023-02-21 - -### Features - -- Merge branch 'DOCOPS-1520-cp' into 'main' (6db2dfdf0700d04d27586e2d34f3e35345499882) -- feat: add versions to Security Monitoring docs (7a6092a8fac9985c62c86f536e220004ee0c21af) - -## 0.205.0 -2023-02-21 - -### Features - -- Merge branch 'devportal-docker-versions' into 'staging' (a6616e5a174e797a542b5795cc14533a920a6dce) - -## 0.204.0 -2023-02-21 - -### Features - -- Merge branch 'remove-devportal-signing-key' into 'staging' (512bacc778f25b4372534acd3264f9a5ea3f76dc) - -## 0.203.0 -2023-02-20 - -### Features - -- feat: Merge branch 'devportal-docker-entrypoint' into 'main' (0212d0b23dffb3cce2d41ea4712be705a4fc74cc) -- Merge branch 'cherry-pick-510e40fc' into 'main' (a4fbd7bc5d87d40248412cea0f772b0c5bb891da) - -## 0.202.0 -2023-02-16 - -### Features - -- Merge branch 'NDD-220-fix-image-refs-dev-portal' into 'staging' (d52ece4f02af98004669147405a4144faf45f990) - -## 0.201.0 -2023-02-16 - -### Features - -- Merge branch 'docs-1120-correlation-id' into 'staging' (cafef48bb969f9f0e6895c495d0e7c40f819789a) - -## 0.200.0 -2023-02-16 - -### Features - -- Merge branch 'docs-1122-bodysizelimit' into 'staging' (9f34bded2496279a730e2714b94eefa5be4c61ee) - -## 0.199.0 -2023-02-16 - -### Features - -- Merge branch 'add-known-issue-for-nms-40484' into 'staging' (8b53821f73353746110d701dc50c72b0d01bdbb8) - -## 0.198.0 -2023-02-15 - -### Features - -- Merge branch 'alpha-sort-acm-policies' into 'staging' (98506c18d6bcb4efc0a0fc4afbb27f6dc321bf7e) - -## 0.197.0 -2023-02-15 - -### Features - -- feat: Merge branch 'helm-docs-image-name' into 'main' (2f3fa9e820ce44b94d814cd75e5a6ccbbabf2f7f) -- Merge branch 'cherry-pick-58de4bf8' into 'main' (c3d8941f14e33ac63e9300214fe0b2cf4d9b0a63) - -## 0.196.0 -2023-02-15 - -### Features - -- feat: Merge branch 'nginx-typos' into 'staging' (5fd8907dc6af4510784051c58fb34c6c5d48688f) -- Merge branch 'cherry-pick-2d8dc9fd' into 'main' (63b144f3f5339b1c252a64dc8c65d1eaafa4f162) - -## 0.195.0 -2023-02-15 - -### Features - -- Merge branch 'proxy-request-headers-docs' into 'staging' (37c986e957438ccbd22a87714faf57ec908f75e6) - -## 0.194.0 -2023-02-14 - -### Features - -- Merge branch 'butmp-acm-policy-template' into 'staging' (843b8212a5bd9eb5cddb169dc609cb18041f5a09) - -## 0.193.0 -2023-02-14 - -### Features - -- Merge branch 'mrajagopal-docops-1603' into 'staging' (d870c0463c05716cef3c918cbddab63b7e1cd5cf) - -## 0.192.0 -2023-02-13 - -### Features - -- Merge branch 'move-log-format-param-table' into 'staging' (263af1498a3a2dbcce2e8eea52c6a77a30c211e6) - -## 0.191.0 -2023-02-13 - -### Features - -- Merge branch 'update-helm-doc' into 'staging' (ce0cbd824d5572d1aa9a21f9496c7630970d3743) - -## 0.190.0 -2023-02-13 - -### Features - -- Merge branch 'cherry-pick-afc2c783' into 'main' (3383025331f828fd27cb08359277001ca97fb6e8) -- feat: Merge branch 'RAM-HELM-DOC-UPDATE' into 'staging' (638e9e58910c0022c6468720dd29b87ecc81723b) - -## 0.189.0 -2023-02-13 - -### Features - -- Merge branch 'NMS-39889/log_format_policy' into 'staging' (f6eee3f794bea8a15c51879fbd08c8c8e9603dff) - -## 0.188.0 -2023-02-13 - -### Features - -- Merge branch 'NDD-212-acm-rhel9-oracle8-support' into 'staging' (21243df01c0ce54b00585b4b83981f8b77cb6b53) - -## 0.187.0 -2023-02-10 - -### Features - -- Merge branch 'cherry-pick-e69dc746' into 'main' (001cef835cca250483c2fb14570fd6304eb31732) -- feat: Merge branch 'dedgar-typos' into 'staging' (7cc884955e43598344d5d6304b3d2beb8d33c2e3) - -## 0.186.0 -2023-02-10 - -### Features - -- Merge branch 'contributing-docs-readme' into 'staging' (3a1a3ccb89e56fb6eae06b277b0a256d2cfe132b) - -## 0.185.0 -2023-02-09 - -### Features - -- Merge branch 'fetch-new-acm-policy-template' into 'staging' (c09bf4bd142fa7d4062be96bb7750c51e0a58743) - -## 0.184.0 -2023-02-08 - -### Features - -- Merge branch 'rjohnston-NMS-40253-nms-prefix-image-name' into 'staging' (8f8880e77da2166a9d0d6f0535d91681ade03739) - -## 0.183.0 -2023-02-08 - -### Features - -- Merge branch 'rjohnston-NMS-40038-fetch-ext-dep-os-list' into 'staging' (81c881c6dd059c125ca005b6754a587fadea666b) - -## 0.182.0 -2023-02-08 - -### Features - -- Merge branch 'fix-NMS-40241-acm-oidc-doc' into 'staging' (9f6a412fe49d247de224ff21694b20a361584ea3) - -## 0.181.0 -2023-02-07 - -### Features - -- feat: Feb 23 theme bump (2f922843f640e22939b1c83929b482c275f23509) -- Merge branch 'feb23-theme-bump-main' into 'main' (0ef82b3dd9b3910984372ba4fc819558ceb44a47) - -## 0.180.0 -2023-02-07 - -### Features - -- Merge branch 'revert-3c8750a3' into 'staging' (1fa309542ef5bcbb96470dd630a5a19670724341) - -## 0.179.0 -2023-02-03 - -### Features - -- Merge branch 'NDD-175-text-code-block' into 'staging' (1feb30b32d8947e46125d0e3e44dcad6e69a3f9d) - -## 0.178.0 -2023-02-03 - -### Features - -- Merge branch 'NDD-189-fix-nim-v1-eula-links' into 'staging' (64122781f88f37b844b59bdbc2b90a10ad36fe64) - -## 0.177.0 -2023-02-03 - -### Features - -- Merge branch 'NDD-125-manage-policies-ui' into 'staging' (ca15f384bab272c5c538619ab0f899dc0beb97f9) - -## 0.176.0 -2023-02-02 - -### Features - -- Merge branch 'rjohnston-NMS-37322-k8s-support-package' into 'staging' (5df7cf9536ccff991ab1eb2d96a75bd940be29d6) - -## 0.175.0 -2023-02-02 - -### Features - -- Merge branch 'revert-49f4eff7' into 'staging' (3dead72c258e37eec6439cad57e63da4b4dfe428) - -## 0.174.0 -2023-02-02 - -### Features - -- Merge branch 'NDD-184' into 'staging' (6744292a3d00f75470833da23b270cdbf460d516) - -## 0.173.0 -2023-02-02 - -### Features - -- feat: Merge branch 'acm-release-1.4.1' into 'main' (ab63896009b07ace3a34c5ac4abb71a3b966410f) -- Merge branch 'acm-release-1.4.1-cp' into 'main' (821cf9895b6be98008cd2c44bed1fcd23bab9c5f) - -## 0.172.0 -2023-02-01 - -### Features - -- Merge branch 'docs-1119-jwt-assertion' into 'staging' (449848a4782e3e850662d26d246807a000c049b7) - -## 0.171.0 -2023-02-01 - -### Features - -- Merge branch 'docs-1118-basic-authn' into 'staging' (192057054ba07841b11c02397838860a9674ff2b) - -## 0.170.0 -2023-02-01 - -### Features - -- Merge branch 'docs-1117-api-key-authn' into 'staging' (a68372506764340551f5bfefcabc1ea8c4630fef) - -## 0.169.0 -2023-01-31 - -### Features - -- Merge branch 'edgar37-staging-patch-25112' into 'staging' (aa2d399bdd8c3c339c79b9db17b4f211ae5d4d5b) - -## 0.168.0 -2023-01-31 - -### Features - -- Merge branch 'edits-to-RN-39943' into 'staging' (a177eba300fa43d562063984fb932ed244b0c074) - -## 0.167.0 -2023-01-30 - -### Features - -- Merge branch 'add-NMS-39431-troubleshooting-guide' into 'staging' (8dc29289ac391d64280719138301839078db4771) - -## 0.166.0 -2023-01-30 - -### Features - -- Merge branch 'sm-release-1.2.0' into 'staging' (e60ec043cbdc2dc6e0221c34fe573f74f6995cb9) - -## 0.165.0 -2023-01-30 - -### Features - -- Merge branch 'nim-release-2.8.0' into 'staging' (f4a472dd0aa46420ce820cef3ba375d3b159256e) - -## 0.164.0 -2023-01-27 - -### Features - -- Merge branch 'fix-DOCS-805' into 'staging' (2b684185cff57ec9de8fa1a25e0250eded2fdeb9) - -## 0.163.0 -2023-01-27 - -### Features - -- Merge branch 'jp-fix-watchdocs-IDs' into 'staging' (4c3923a9eb9b04975dd5c1dea5d8b86a80d81890) - -## 0.162.0 -2023-01-27 - -### Features - -- Merge branch 'NMS-40214-fix-Docker-file-names' into 'staging' (617efb5dad2cf8ed1159f2e061e8159007ab8421) - -## 0.161.0 -2023-01-26 - -### Features - -- Merge branch 'NMS-40142-helm-create-namespace' into 'staging' (3c983ff922c975d1a2a2d0f7c181951cbc16eff9) - -## 0.160.0 -2023-01-25 - -### Features - -- add missing link to acl consumer policy and fix names of policies. (a261b16115acd88f9839054c1a4222516520ccb7) - -## 0.159.0 -2023-01-25 - -### Features - -- fix: fixes broken links blocking publishing from main (015dd862d882c254476425119da32aaaa3229c05) -- Merge branch 'fix-broken-install-guide-links' into 'main' (6843d9bc3710cca9323a07c3c41a4c64ee0fdaed) - -## 0.158.0 -2023-01-25 - -### Features - -- Merge branch 'known-issue-NMS-40142' into 'staging' (f13200c43583c83715b55abb68f036d878aba371) - -## 0.157.0 -2023-01-24 - -### Features - -- Merge branch 'NMS-40045-b' into 'staging' (af7bd22c85f984598d8dce2f0d0a12a9cda90d81) - -## 0.156.0 -2023-01-24 - -### Features - -- Merge branch 'docs-db' into 'staging' (ce08f651b008e8eda1255ab73be5a4333ab2dd95) - -## 0.155.0 -2023-01-24 - -### Features - -- Merge branch 'NMS-40045-docs' into 'staging' (d5865f71440e716d9f10a6c3fdc30f70fc7f619f) - -## 0.154.0 -2023-01-24 - -### Features - -- Merge branch 'staging' into 'staging' (17d71e926220d7771fcc536a07a6bd5a0ecd7bb9) - -## 0.153.0 -2023-01-23 - -### Features - -- Merge branch 'acm-upgrade-paths' into 'staging' (3352951da2408a39e43a0ce8f4247ab23684b321) - -## 0.152.0 -2023-01-23 - -### Features - -- Merge branch 'acm-release-1.4.0' into 'staging' (74be80d14913fe1eb1a2474c4b38b00fc98242fc) - -## 0.151.0 -2023-01-23 - -### Features - -- Merge branch 'cherry-pick-446c714b' into 'staging' (864d98883c227985bc4fcfb7579a1561a2453986) - -## 0.150.0 -2023-01-23 - -### Features - -- Merge branch 'nina-tags' into 'staging' (5cfc9fa829c00255ea5302cff7f3e7dc8da997f6) - -## 0.149.0 -2023-01-20 - -### Features - -- Merge branch 'mrajagopal-docops-1570' into 'staging' (0ea2f4ec3fcc4cf65e3ebb1b6ff452a91eb27c1e) - -## 0.148.0 -2023-01-19 - -### Features - -- Merge branch 'fix-helm-links' into 'staging' (be80b507dc757ad47285535684f1eceb9e637b32) - -## 0.147.0 -2023-01-18 - -### Features - -- Merge branch 'NMS-39814/compilation-error-info' into 'staging' (927ff054e8c1a702e078c2c69138023ff17c91f8) - -## 0.146.0 -2023-01-18 - -### Features - -- Merge branch 'acm-known-issue-39943' into 'staging' (e970db31aca3d1cfa2977e67bf7dca723e2b1803) - -## 0.145.0 -2023-01-18 - -### Features - -- Merge branch 'acm-helm-docs' into 'staging' (0cea042fef03e27745d726d58b48be080e0ffd48) - -## 0.144.0 -2023-01-17 - -### Features - -- Merge branch 'NDD-115-acm-nginx-plus-matrix' into 'staging' (2aef9b2cc14179036d73b89bb46fcdad5f47dff5) - -## 0.143.0 -2023-01-17 - -### Features - -- Merge branch 'NMS-39785' into 'staging' (569602469371b97d143503baac1253d7fcb8e7be) - -## 0.142.0 -2023-01-17 - -### Features - -- Merge branch 'NMS-39786' into 'staging' (8b9139e5b962a8f0b2b6f530f759bb2c6e796f56) - -## 0.141.0 -2023-01-10 - -### Features - -- Merge branch 'docops-1565-nim-admin-password' into 'staging' (c4d93d37ba76a364b63c7029d0903847b3f87fe1) - -## 0.140.0 -2023-01-10 - -### Features - -- Merge branch 'DOCOPS-1562' into 'staging' (34634a7b812cc792cc039afe0091e64d6d989a00) - -## 0.139.0 -2023-01-09 - -### Features - -- Merge branch 'nim-app-sec-docs' into 'staging' (fda3d5529f1f69db2025fa8c6a3f036f6b18c3f6) - -## 0.138.0 -2023-01-06 - -### Features - -- Merge branch 'fix-clickhouse-alias' into 'staging' (a7cebae971e4d6dbbb10829e978db6d2712e549a) - -## 0.137.0 -2023-01-06 - -### Features - -- Merge branch 'add-clickhouse-alias' into 'staging' (2d3307063eebe0d0565b21f808c1ff97db77ead4) - -## 0.136.0 -2023-01-05 - -### Features - -- Merge branch 'NMS-39426' into 'staging' (35f30033968b5461bea86c6a81077975854dd291) - -## 0.135.0 -2023-01-04 - -### Features - -- Merge branch 'docops-1558-tech-spec-nginx-support' into 'staging' (c2be5a645dad5b4bc1304fb6c1b8ec28c8caf48a) - -## 0.134.0 -2023-01-04 - -### Features - -- Merge branch 'clean-up-nim-kis' into 'staging' (d744db0b4e2d89cdc7acd2414bb6d383266883c4) - -## 0.133.0 -2022-12-21 - -### Features - -- Merge branch 'edit-to-nms-38876-rn' into 'staging' (ee8db3d23c4dd8516f9b1d5c7af86031d55d4dae) - -## 0.132.0 -2022-12-20 - -### Features - -- Merge branch 'nim-release-2.7.0' into 'staging' (df79c403e5e0a348982b579d0039db8973362a13) - -## 0.131.0 -2022-12-16 - -### Features - -- Merge branch 'acm-release-1.3.1' into 'staging' (ae9329221e969e2fc64b7f1a451ba1f172784273) - -## 0.130.0 -2022-12-15 - -### Features - -- Merge branch 'jputrino-patch-DOCS-1099' into 'staging' (4eacb6fe4ffec102bb74994a5ca7ca7c1516a83a) - -## 0.129.0 -2022-12-15 - -### Features - -- Merge branch 'DOCOPS-1544-rename-nms-repo' into 'staging' (5501756fedefdaff7f0247134df0d568be843ab2) - -## 0.128.0 -2022-12-13 - -### Features - -- Merge branch 'DOCOPS-1538-default-api-proxy-policy' into 'staging' (c0d097413ac85dbb1a316ed851fb2ba24a1cbf0e) - -## 0.127.0 -2022-12-13 - -### Features - -- Merge branch 'NMS-37244-staging' into 'staging' (6adb0a305d31c0ee5a779b6a6de49842e6f8520f) - -## 0.126.0 -2022-12-12 - -### Features - -- Merge branch 'acm-release-1.3.0' into 'staging' (f79bf1358cd0c0b4408dee01f792ba0e35d0e232) - -## 0.125.0 -2022-12-02 - -### Features - -- Merge branch 'NMS-39204-dupe-upgrade-command' into 'staging' (03aa8af7cd0da9e9661a6f49f2205f93f965a9a7) - -## 0.124.0 -2022-11-30 - -### Features - -- Merge branch 'add-containerized-NIM-toubleshooting-guide' into 'staging' (1dbf0045901a4449038f262383dc0269d3dd90b1) - -## 0.123.0 -2022-11-29 - -### Features - -- Merge branch 'NDD-65-NIM-2.6-RN-edits' into 'staging' (7d9b2dc4d65fdbd8e186c14f4abfd08936365588) - -## 0.122.0 -2022-11-29 - -### Features - -- Merge branch 'watchdocs-audit-cp' into 'staging' (4b700f3f58541b9d4ad2517d6c2e2936364b14eb) - -## 0.121.0 -2022-11-28 - -### Features - -- Merge branch 'NMS-38380-install-n-plus-metrics' into 'staging' (b4df885bdf8c14e612fa1161fd2f5ee8a55f9d89) - -## 0.120.0 -2022-11-28 - -### Features - -- Merge branch 'NMS-38589-fix-helm-upgrade-strategy' into 'staging' (c10db00eaeaf5395090490e96a41b56f459f00e8) - -## 0.119.0 -2022-11-23 - -### Features - -- Merge branch 'nim-2.6.0-rn-updates' into 'staging' (3f21f065384791a98d16d983afc6899f39a541e7) - -## 0.118.0 -2022-11-18 - -### Features - -- Merge branch 'update-nimvers' into 'staging' (52971771f16b93bbdcb1ae96ecd87758f36ae0f8) - -## 0.117.0 -2022-11-18 - -### Features - -- Merge branch 'nim-release-2.6.0' into 'staging' (5e0139d7b5559232063c6f7d94d10daaa23740ef) - -## 0.116.0 -2022-11-15 - -### Features - -- Merge branch 'NMS-38586-helm-upgrade' into 'staging' (1741140128ddc4a49f232dffa0849ada68174d39) - -## 0.115.0 -2022-11-03 - -### Features - -- Merge branch 'docops-1478-explain-nms-platform' into 'staging' (bc1f376cc77b16191226027c5099b8ece8418941) - -## 0.114.0 -2022-11-03 - -### Features - -- Merge branch 'allowed_directories_staging' into 'staging' (cfbffcb89dc37dd98d94643feca2f690abe253c9) - -## 0.113.0 -2022-10-27 - -### Features - -- Merge branch 'add-keycloak-acm-1.2-rns' into 'staging' (077ccada5bdd74c7ebb51bfe59d80edc300e3570) - -## 0.112.0 -2022-10-27 - -### Features - -- Merge branch 'acm-nms-37420-introspection-tutorial' into 'staging' (328a3f4116cbf4153e5861508a8771fef9ce61e0) - -## 0.111.0 -2022-10-25 - -### Features - -- Merge branch 'rprabhu-edits-2' into 'staging' (f363eaa8a6d4048c70218e88761364a68d1f2cde) - -## 0.110.0 -2022-10-20 - -### Features - -- Merge branch 'fix-minor-issues-in-acm-docs' into 'staging' (1edea52ffcddfc6b11abaad226d1191d141da532) - -## 0.109.0 -2022-10-19 - -### Features - -- Merge branch 'acm-1.2.0-grpc-preview-not-alpha' into 'staging' (a3f5b72e0c0e0552a3832aa3fda0242d93520c64) - -## 0.108.0 -2022-10-19 - -### Features - -- Merge branch 'acm-grpc-proxy-edits' into 'staging' (0e0ce55c2773e96086d56c1700435895afebdeba) - -## 0.107.0 -2022-10-18 - -### Features - -- Merge branch 'fix-typo-grpc-method' into 'staging' (e0cd3e54c1f5e3f3e680d961cba286e7b9c252e5) - -## 0.106.0 -2022-10-18 - -### Features - -- Merge branch '1.2.0-release-notes-fix' into 'staging' (7249e0cbb6f69bfbe6d9c4e78c9858dd33d00d90) - -## 0.105.0 -2022-10-18 - -### Features - -- feat: publish ACM 1.2.0 docs - Cherrypick (1a97b08e6f006124ac7726721d91fce21c933121) -- Merge branch 'cherry-pick-3baee6f8' into 'main' (418da36e38c4978980189fabf65a91edce1093f8) - -## 0.104.0 -2022-10-18 - -### Features - -- Merge branch 'DOCOPS-1482-acm-troubleshooting' into 'staging' (b8d7f729570fd2597f121d3c36e1fc358b847c13) - -## 0.103.0 -2022-10-17 - -### Features - -- Merge branch 'fix-broken-example-json-link' into 'staging' (732de4c98db7cd2389688fc6ce96f22f50d21648) - -## 0.102.0 -2022-10-14 - -### Features - -- Merge branch 'tidy-up-include-files' into 'staging' (0218b3afeb1243db7c58edbfa9e10b1509222cb3) - -## 0.101.0 -2022-10-13 - -### Features - -- Merge branch 'count-instance-supported-distro-table' into 'staging' (eeb6e562993e1aa8399726d28e4c3ab511c469f0) - -## 0.100.0 -2022-10-13 - -### Features - -- Merge branch 'rprabhu-edits-2' into 'staging' (cf45136075b41009b08ab6252707e853a679ae53) - -## 0.99.0 -2022-10-12 - -### Features - -- Merge branch 'cherry-pick-063eca29' into 'main' (d8871aa10748ea70b4af08ce7b26324655cf56a3) -- feat: Merge branch 'nms-37940-uninstall-devportal' into 'main' (628cecf7bfd34e47e2c4f13591dfcbe0b185a65e) - -## 0.98.0 -2022-10-11 - -### Features - -- Merge branch 'DOCOPS-1480-fix-broken-links' into 'staging' (448b1d916df6137fe2df2c31b8691c5e3e3bb146) - -## 0.97.0 -2022-10-11 - -### Features - -- Merge branch 'nim-2.5.1-rns' into 'staging' (641a75f873b5e38fd1ea2f5786615d254ae5a8a5) - -## 0.96.0 -2022-10-07 - -### Features - -- Merge branch 'NMS-37749' into 'staging' (5d8e03219b6e485720bfa4eb069196106b2ec182) - -## 0.95.0 -2022-10-06 - -### Features - -- Merge branch 'combine-cert-key-copy-rename-commands' into 'staging' (74a2b678ae005ce2d7222e78c45dc3d2d72bddf8) - -## 0.94.0 -2022-10-06 - -### Features - -- Merge branch 'move-install-first-in-nav' into 'staging' (e0905e7ce1275fffe8ee16e2116473c5205197f8) - -## 0.93.0 -2022-10-06 - -### Features - -- docs: cherry-cick click house version bump to main (3e6e2f484d6e09b05d9139bb693670540660a782) -- Merge branch 'cp-clickhouse-version-bump' into 'main' (c7559728d876acd7d139648edd88a776b317f293) - -## 0.92.0 -2022-10-06 - -### Features - -- Merge branch 'cp-tabbed-install-guide' into 'main' (abeed70988247c90324e5f4d72d147c6891908ef) -- feat: cherry-pick tabbed install guide into main (aac40482020cf3dfd4a736f0a8474e2ada8f798d) - -## 0.91.0 -2022-10-04 - -### Features - -- fix: cherry-pick to fix typo in NIM 2.5 RNs (543355d0909a11ecd3c28dad373a440b68fadf02) -- Merge branch 'cherry-pick-2971834d' into 'main' (35ec817c38abf98d10728fbd5e78b90e34025f67) - -## 0.90.0 -2022-10-04 - -### Features - -- feat: From NMS-37230-configure helm chart to use nginx-plus as apigw (ad82a9f793d4d559333dec136e73edd14921e2e3) -- Merge branch 'NMS-37230-oidc-nms-helm' into 'main' (9171dcf7703ccadaeed584f2156e7fafc4a947d0) -- feat: F5-hugo theme September bump (5dd7955c93ef229e682dc08266f8ff100bcbaf64) -- Merge branch 'f5-hugo-september-bump' into 'main' (4da16ff02d2c5e2b8fbfb234ffd66c7a63db2a07) -- docs: NMS-37155 "Add how to enable create credentials" (dd95f560ef5946eb81734b20d329c495a4bf1bd0) -- Merge branch 'NMS-37155-Add-how-to-create-credentials' into 'main' (8dbeeabb3b56f81438b0c21def6019fe6762ef5f) -- Revert "feat: Merge branch 'NMS-37230-oidc-nms-helm' into 'main'" (c66098ad61f2d13a7f55715bccab492f1a334ea9) -- Merge branch 'revert-9171dcf7' into 'main' (477efb2912f866893ec035452ed25dbe2c09b8b3) -- fix: created includes for tech specs (6f092848c9f21534481fcc4d7db7fbad186ceb16) -- Merge branch 'DOCOPS-1427-break-up-tech-specs' into 'main' (7543e68aa5b9fd9490dde60f5b3f277411ef52c1) -- fix: added back steps for enabling/starting nms- services on RHEL (4a7d6bf99578e45d1359f9c7751a005992af7489) -- Merge branch 'docops-1428-start-nms-services' into 'main' (e145167c79bebf608ae59e855c7ca4c5cdb6454f) -- Merge branch 'fix-broken-link-rbac-doc' into 'main' (e801125bad7a3375d6848985ea72609625f6b963) -- fix: broken link in RBAC doc (0de90c4cd802159394d5af542d9eba642b4efbf9) -- Merge branch 'NMS-37346' into 'main' (8d25be8e0a4cbf035a408e0c720cae4c80c1e88d) -- fix: update information about export functionality in inventory (6ff6f042eb063acb9a68a521480e25a462703feb) -- Revert "feat: Merge branch 'NMS-37346' into 'main'" (0924689ab96aa83c688de17ab70f100a45882040) -- Merge branch 'revert-8d25be8e' into 'main' (a2c0f66e7f6c2e1872cc3ee564714f22c59abaff) -- docs: refactored preparing NMS platform docs (7083744af7c69f8959fcf379afc147b1d4a5382d) -- Merge branch 'DOCOPS-1430-refactor-ACM-getting-started' into 'main' (cfe0667e0a8f62e816c3b6d1193d44101086a045) -- feat: cherry-pick 'nms-36444-update-acm-definitions' into 'main' (8935484639854f7c9d7789303e2f4946c5cd6a13) -- Merge branch 'cherry-pick-e7d5db6c' into 'main' (ca4666fb8349fac014efdd9aa01a67381b02299c) -- chore: add codeowners file (3b2a75a2e446d0dba42897740bea891af88fcc90) -- Merge branch 'j-putrino-patch-codeowners' into 'main' (cbf3e5f61f2ac532394d26c165f2cb8026492d6d) -- fix: cherry-pick clickhouse version bump in fetch-external-dependencies script (f1f428a9a6ca1c6b46689e92710b203e2585be35) -- Merge branch 'cherry-pick-76685a6e' into 'main' (70661a039df96cdf20a37d81bdaf345188b40ab3) -- Merge branch 'cherry-pick-77f6556d' into 'main' (276c0b84b88b8628d3e6907b46701c3f094cc148) -- feat: CP NIM upgrade dependencies for ACM (3b7f34403c13a78b0e0928223af83650f7a0c181) -- feat: Merge NIM 2.5 docs into main (9101d694977650f7df6c4a35df1a5cab818fb581) -- Merge branch 'cherry-pick-da3937bf' into 'main' (15178883ddcf5e0373158e1151542af08225215d) - -## 0.89.1 -2022-09-07 - -### Fixes - -- DOCOPS-1402 - (88cada52bde3e0e583ecf1e80ba749311f014c2c) - -## 0.89.0 -2022-09-05 - -### Features - -- fix: add versions string to ACM docs (bff5c9888cb3df53ed152468dde2c6a97458cf8a) -- Merge branch 'DOCOPS-1293' into 'main' (383d7be07c1e7a1ca192adc8b5f16aea0560bbec) - -## 0.88.0 -2022-09-01 - -### Features - -- Clickhouse edits (535a9c878e472979da86392c0129de683b5f6d3e) -- Merge branch 'clickhouse-edits' into 'main' (aeeb27b3648ed872c1db58192e413752d69bfd9d) - -## 0.87.0 -2022-09-01 - -### Features - -- fix: edits to CH doc and added redirect from broken CLI link (5ca5113688053c8be75f4b1d02230ef4073553fa) -- Merge branch 'clickhouse-edits' into 'main' (1d8b974ecb79e826e7f3a929bbf7dcf02c1bc75d) - -## 0.86.0 -2022-09-01 - -### Features - -- fix: broken CH links (5517fba04c5529510f4f1da4bac40068f4fcc64a) -- Merge branch 'clickhouse-edits' into 'main' (2668142aab6d5ede583f650c4d4f8761d30559fb) - -## 0.85.0 -2022-09-01 - -### Features - -- fix: broken link to CH doc (4426489c32f12f102b7d9f7e6ba240006226791d) -- Merge branch 'clickhouse-edits' into 'main' (26013c18634a851b1eecb135ab8189462fe994f9) - -## 0.84.0 -2022-09-01 - -### Features - -- Merge branch 'clickhouse-installation-edits' into 'main' (cbe99730fc1b51b3ce28e2f240b0d7c7db964044) -- fix: added new topic on how to configure clickhouse (3961a8d5762b76811d498f918486c712edd22c96) - -## 0.83.0 -2022-09-01 - -### Features - -- fix: cleaned up the agent installation guide (508b24a9746115856fb51fc3e4d43795ac3e5e6b) -- Merge branch 'agent-install-edits' into 'main' (40f16c51ead01d1a3d2f1365a4c9daf01223aaec) - -## 0.82.0 -2022-09-01 - -### Features - -- fix: update Agent config, environment variables and CLI flags (e9aa5fef4da1d7f839aa7b2f2d1302719bbd9fe2) -- Merge branch 'nms-34779' into 'main' (549622552f12ae3683db9e01960dcbd500f64f84) - -## 0.81.0 -2022-09-01 - -### Features - -- Merge branch 'DOCOPS-1394b' into 'main' (1bd489944256d5c239cd43b1613d7b49dae1bdea) -- fix: exclude taxonomies from sitemap (c6419059a832a31ad413bcc030fd6f4c2c85d4ca) - -## 0.80.0 -2022-09-01 - -### Features - -- NMS-36992 devportal upgrade steps (bd963b2a646b0ca32a2cbc44a0d722b1bcc63660) -- Merge branch 'nms-36992-devp-upgrade' into 'main' (0ccd74c8124453a45c6258116944b6f8970c1a50) - -## 0.79.0 -2022-08-31 - -### Features - -- Tech spec edits (e85e4eff1efd030783ab1fe9b290b82443f628a0) -- Merge branch 'tech-spec-edits' into 'main' (652bbacf78e3941a622ae8617b4a7a67910621a3) - -## 0.78.0 -2022-08-31 - -### Features - -- Merge branch 'docops-1388-dev-portal-separate-host' into 'main' (5cb6379fa8f03455886ecce284d61337de6cb40f) -- fix: creatd shortcode for devportal dedicated host note (e19dbf392d6c75783a4ea8c58201c37004a77abc) - -## 0.77.0 -2022-08-31 - -### Features - -- Tech spec table hr (e253d7a2be6e3a56ba24545475cb5e3828389dfb) -- Merge branch 'tech-spec-table-hr' into 'main' (c87dbd06c9abf7ae25b9f73cf2bd3c86d33be231) - -## 0.76.0 -2022-08-31 - -### Features - -- fix: clean up categories (9d6e113a5e5065131402fb91acc4a17dcb9c9806) -- Merge branch 'DOCOPS-1394' into 'main' (9e02084f2ce38a11289ebafa01f503e4d72e746c) - -## 0.75.0 -2022-08-31 - -### Features - -- fix: correct tech specs for ACM (eb9a33ca4a49329ee47f3d3eed201bfa69afb796) -- Merge branch 'tech-specs' into 'main' (b999e288bdde9b6231902ca5223b3b87b2a2c0e1) - -## 0.74.0 -2022-08-31 - -### Features - -- Merge branch 'acm-release-1.1.1' into 'main' (5c94016f6ae38552e404b0c105fda2c074c4f928) -- ACM 1.1.1 docs release branch (1c71b73aae28faeb86f9dd61f564923faa661a66) - -## 0.73.0 -2022-08-30 - -### Features - -- Merge branch 'npi-ssm-aws' into 'main' (381a42d9dee357881ff6e61002ac5bd7aaa8de7b) -- docs: Adding Session Manager details if exposing SSH isn't allowed. (0602d5bfc0f11e41ef9a521c0ad1581dbf015478) - -## 0.72.0 -2022-08-30 - -### Features - -- Merge branch 'update-link-sso-devportal' into 'main' (a1554036893e7ff8e48822400003f7ac4317fda1) -- fix: renamed setup-oidc-devportal file (dd7b2a10e6523e20f78b06e1192c08e71dc4734d) - -## 0.71.0 -2022-08-30 - -### Features - -- Merge branch 'add-users-redirect' into 'main' (b9d4709583d19ff30190aded166acd41743aa32d) -- fix: fixed redirect for Add Users URL (b536f03ec624bc6c411fcd52eab18f4f432d6d6d) - -## 0.70.0 -2022-08-30 - -### Features - -- fix: clarified how to configure basic auth for new users (ad87f98d814043ffbd4524fa0728d6ab5a866dff) -- Merge branch 'docops-1369-add-user-basic-auth' into 'main' (c3fadbbefc290e95714821dabcfc8484cc0dcc0e) - -## 0.69.0 -2022-08-30 - -### Features - -- Merge branch 'acm-36957-update-dns-limitation' into 'main' (d1d9c57b5739947d3db46b01524f04e41b35c5a8) -- Acm 36957 update dns limitation (6a2f1bc727d8f9f21d38cd05f8c53709de6aa831) - -## 0.68.0 -2022-08-30 - -### Features - -- fix: update Agent CLI flags (0bbe6893e98226a6a3739c4c063ff68f4d80e770) -- Merge branch 'DOCOPS-1323' into 'main' (d3cedcdd6095503d6807bfb5d9c1375f8b940ece) - -## 0.67.0 -2022-08-30 - -### Features - -- Merge branch 'fix-ki-bug-icons' into 'main' (48713e64cfb9bae890db0ae8e26c303cf04fa039) -- Fix ki bug icons (875893ca33b2da3473ae1a7d41e2a35e4f8ddf83) - -## 0.66.0 -2022-08-30 - -### Features - -- Merge branch 'add-acm-known-issues' into 'main' (49949b2d6b7087561018ba16767668805a36df47) -- fix: adds ACM known issues retroactively (3ec689ebfe70b2d3af1c957b5ec9d3c3162e4331) - -## 0.65.0 -2022-08-29 - -### Features - -- feat: Add additional ACM pages to watchdocs (d9c9d0519a5d09d13436272f500f05d3fde01429) -- Merge branch 'watchdocs-acm-addendum' into 'main' (93e1d1dc201cb8a6ac491c29f0b945df65f7937e) - -## 0.64.0 -2022-08-29 - -### Features - -- docs: added nap reporting section (ccccf167f953bb9f9485de49de7dfd0745f5d34a) -- Merge branch 'NMS-36245' into 'main' (f088a4ca108f57511e2207a0e16cd9364e9a2e3d) - -## 0.63.0 -2022-08-26 - -### Features - -- fix: added RN for NMS-32145 (199c83dba9afd9f966536c8c5cff39cb1c0dfd4e) -- Merge branch 'NMS-32145-add-RN-NIM-2.3' into 'main' (4ac2e3e71a3f1f76cd5d31644d7581c55380bbda) - -## 0.62.0 -2022-08-26 - -### Features - -- Merge branch 'NMS-36933-clickhouse-link' into 'main' (3cea32bd32076e68495324925d3ff133ae7dc3de) -- fix: fixed ClickHouse link (16654087516d0bceea5a1abafd0f8e361f765522) - -## 0.61.0 -2022-08-26 - -### Features - -- feat: DOCOPS-1278 Add untagged NIM pages to watchdocs (232d54047872881408c9c20f52685f6f14b66039) -- Merge branch 'watchdocs-addendum' into 'main' (f2e80be2790b300c8e2c5d7a04b9cc6c6b2dabee) - -## 0.60.0 -2022-08-26 - -### Features - -- feat: DOCOPS-1278 Add remaining live ACM content to watchdocs (6cf3ba16141bfb63a8d54505bfc1951d3af74bff) -- Merge branch 'acm-watchdoc-amendments' into 'main' (737af7049c3029faedee3a044e34379274a9dc0e) - -## 0.59.0 -2022-08-25 - -### Features - -- feat: DOCOPS-1278 Add docswatch metadata to ACM files (0c27922831ca3c7dd72c7b287c0a5148353d0f12) -- Merge branch 'nms-acm-docswatch' into 'main' (5c0d665e8281e36b1b4309aa80824d9399e875a5) - -## 0.58.0 -2022-08-24 - -### Features - -- Merge branch 'DOCOPS-1381-using-metrics-api' into 'main' (8ffd63398994b3be0d7dcc73c2b1cda55da17158) -- fix: removed comma from metrics query (91999a753b25767d5d5294c80992b714afe3f860) - -## 0.57.0 -2022-08-24 - -### Features - -- Merge branch 'add_nms_watchdocs' into 'main' (6db21c0006c0e0c1d315f2d78693b2b82a8a658a) -- DOCOPS-1278 Add docswatch catalogue codes to admin-guides section (6f141f8de1a575c05934a48fede82d59aaace214) - -## 0.56.0 -2022-08-23 - -### Features - -- fix: updated location for placing certs (9abffa2bcf495b2390b20e9e8aa4ec692fc80cff) -- Merge branch 'docops-1368-secure-traffic' into 'main' (b3afa35192050355f1d504b91af9cf6c613481ec) - -## 0.55.0 -2022-08-23 - -### Features - -- Merge branch 'DOCOPS-1056' into 'main' (c256ce644e4ba5c9d5ce69d5ff0c8c0fb7749a85) -- feat: Add new NMS Overview document (b413a682b557880540430ec7893c20569485974a) - -## 0.54.0 -2022-08-23 - -### Features - -- fix: Adds RN and doc update for nms-integrations (f7457bc435ea440e4d0a0c619dafb1b84df5db79) -- Merge branch 'NMS-36731-nms-integrations' into 'main' (25acd7aea11c90961b21d80e9ec9258963d984c7) - -## 0.53.0 -2022-08-22 - -### Features - -- Merge branch 'install-password' into 'main' (607425af98de8b9807ed4036d80780a661d2a093) -- Add password note to NMS installation (ab19851481f956b514052e299a612fdec3b3b93f) - -## 0.52.0 -2022-08-22 - -### Features - -- Merge branch 'aws-edits' into 'main' (c028f2777876e0da36ec5cd18c929dad2a26ab92) -- fix: Removing example names in AWS, provided no value. Fixed tabel reference. (9b9c617083954d6752a84e8cfd19a13f898beb84) - -## 0.51.0 -2022-08-22 - -### Features - -- fix: delete unused doc (3bc29c6aa1dbeca53799acaaa0d4077fb3cdb413) -- Merge branch 'DOCS-896' into 'main' (d7018a7904d9ba4d2ae26a85508b39c734a59ab5) - -## 0.50.0 -2022-08-19 - -### Features - -- Merge branch 'npi-aws-edits' into 'main' (0ad6efccf71d94c236dea73894375361efdc929f) -- Npi aws edits (12c62249e01eebf5443bf462435d95eef1d2c40d) - -## 0.49.0 -2022-08-19 - -### Features - -- Merge branch 'enable-tutorial' into 'main' (0b201154f647b7f020977818e5050f996a2aec09) -- fix: Enable Tutorial Section and Word Fixes to adhere to F5 Style Guide (f5998dd2f0408c1f93fc1966d8b485cf1fcc394a) - -## 0.48.0 -2022-08-19 - -### Features - -- fix: reorged the NMS upgrade guide (a5d5546a4f361bd72d1e0e980a9bd623b97c75d8) -- Merge branch 'docops-1340-upgrade-reorg' into 'main' (103a144f8dae43683713741d28ce6a581b710af9) - -## 0.47.0 -2022-08-19 - -### Features - -- docs: Update content/tech-specs.md (96c1a37447e43760c904d7f0c33b2141d40f16f7) -- Merge branch 'docops-1365-acm-plat-compatibility' into 'main' (4b5466d7c14928805833ff508c6fbdbd9549b70d) - -## 0.46.0 -2022-08-18 - -### Features - -- Upgrade instructions acm release 1.x (54c57a0ffbb769a03f9c7fe7a7201ba1cdfd8cff) -- Merge branch 'upgrade-instructions-acm-release-1.x' into 'main' (15801081f06d13353a29855991c37a31744c593a) - -## 0.45.0 -2022-08-18 - -### Features - -- Merge branch 'npi-update-arch-diagram' into 'main' (f5c1c4ccf2f9594a820ea8183a1abbf04af3ada7) - -### Fixes - -- Updating architecture diagram updating Data Plane Host and API Gateway Proxy (333c6052c13ea0ec285a2376b438cbd9e269d298) -- Updating name of the controller host to also include ACM Module for clarity. (7f60b7a97a2ac2bdaa56c6f2a2695eef26dbec2e) - -## 0.44.0 -2022-08-18 - -### Features - -- Merge branch 'docops-1364' into 'main' (97835fa2af911fad2cfebff4e1c03b76d0e20b97) -- fix: DOCOPS-1364 remove tagging job from gitlab-ci (e76873e198387127c2507c1f97961f1980e1911b) - -## 0.43.0 -2022-08-18 - -### Features - -- Merge branch 'remove-draft-true-aws-deploy-doc' into 'main' (07824d659fb79b09c609a1a812f95d5d01f1567d) - -### Fixes - -- removed draft status (d97bff51f0d44e69932edb39986915ccd8abd6fb) - -## 0.42.0 -2022-08-18 - -### Features - -- Merge branch 'acm-release-1.1.0' into 'main' (8c45a5aa4bf5bb1e585496602bf9bd6d4976f78a) -- feat: ACM release 1.1.0 (61e23401223425be474033f502f0f7dd281d874e) - -## 0.41.0 -2022-08-18 - -### Features - -- Merge branch 'DOCS-909' into 'main' (5f1c397b6b15adb8038ea808b72d3453df02a8e5) -- fix: update DOCS-909 catalog entry code (6cdfa0e63a4f262381058e558ca760847890cb29) - -## 0.40.0 -2022-08-18 - -### Features - -- fix: remove dummy techspec pages for old redirect (51801bbe2e3c64690df572c86c13360f5c57ac2c) -- Merge branch 'DOCOPS-1362' into 'main' (95e85ca9752e2dd51da9b9a37d70173bbb1ac5fc) - -## 0.39.0 -2022-08-17 - -### Features - -- fix: update footer for NMS and revert NMS-36400 (efd0cc0780105fa7b1c8d6345683a13d323cbaca) -- Merge branch 'DOCOPS-1358' into 'main' (e07b4d3a6d8ba701e002d31c772cc14b29c9d05f) - -## 0.38.0 -2022-08-17 - -### Features - -- fix: resolves DOCOPS-1355, DOCOPS-1356 (abccc97d2c49491087f6b5b76e5395704e87e6d7) -- Merge branch 'uninstall-guide-updates' into 'main' (083a2265c872b028117623aabe52983f4b551e49) - -## 0.37.0 -2022-08-16 - -### Features - -- Merge branch 'nms-36599' into 'main' (414f3c2162e6adf883f6d1c2980b5447e2169efb) -- NMS-36599 - update agent onboarding instructions for ACM (13d498f633be8dd0ea42395ed5aba428f79deef9) - -## 0.36.0 -2022-08-16 - -### Features - -- docs: NIM 2.4 RNs and doc updates (aca22a0b42290f47682dab7372f2b9265c3c2a72) -- Merge branch 'nim-release-2.4' into 'main' (a90cac0f3cf369a19fa0d51b53b698633b270d3f) - -## 0.35.0 -2022-08-15 - -### Features - -- Merge branch 'min-nim-version-for-acm' into 'main' (9bb846422c4406570459df82bbe83b78e9a57239) -- fix: add NIM dependency table to ACM tech spec guide (a4ef93bd496a753e63d2a1967806d947805492b2) - -## 0.34.0 -2022-08-15 - -### Features - -- fix: DOCOPS-1350 Update resulting file path name (1cfcaf1d2ef7512e10163a821188a69987b7f34b) -- Merge branch 'DOCOPS-1350' into 'main' (0bd072df949626792b5ef3e8971fc2b685f7b226) - -## 0.33.0 -2022-08-12 - -### Features - -- Merge branch 'tsg-updates' into 'main' (a05272a718faa05ff2534e284e2ea3b5c215e302) -- fix: updated troublehsooting section for NIM (39ef10e2231bb539871a140ebede00f3a9043e67) - -## 0.32.0 -2022-08-12 - -### Features - -- fix: adds Known Issue 35339 to ACM 1.0 RNs (df6993cd046fceb894c090099eba63d18180d412) -- Merge branch 'add-acm-1.0-known-issue-35339' into 'main' (3cf3c5e5b5192fd7a15c956f455a30eb89d4129a) - -## 0.31.0 -2022-08-09 - -### Features - -- feat: how-to developer portals (01844f3eaf1bcfb1dd4d4dc30c93beeb1bf536cf) -- Merge branch 'NMS-34244' into 'main' (cf9b16fe827cae28b6bedcb9d2a225fe6c9116c9) - -## 0.30.0 -2022-08-09 - -### Features - -- fix: Add dummy sections for NIM/ACM redirects - Hide them from the sidebar (8264ddc255853cdd2c30bc4dbf39b4e2ace45804) -- Merge branch 'NMS-36400' into 'main' (20f716c6eb22dc8938c9eba27bf9b5805a31831e) - -## 0.29.0 -2022-08-08 - -### Features - -- Revert "feat: Merge branch 'NMS-36400' into 'main'" (a1c031d48f22f9f601d04766aef66904d30ec5b2) -- Merge branch 'revert-361f81fd' into 'main' (86f317e3cf67ee1c0d179ff03a7f66bc39b8dee7) - -## 0.28.0 -2022-08-08 - -### Features - -- Merge branch 'NMS-36400' into 'main' (361f81fde3e631e7460ecc925776bc85aa97dc0e) -- fix: add redirect templates and dummy NIM sections (a1952a87dc09c833210d5b7bd76577217497d07b) - -## 0.27.0 -2022-08-06 - -### Features - -- fix: adds offline install steps for ACM and Dev Portal (74776a9643132a92fd9271b2b2e0637020ccf0b3) -- Merge branch 'docops-1273-acm-offline-install-guide' into 'main' (806b3dd32b4b4533de924402fafe78853dce9818) - -## 0.26.0 -2022-08-05 - -### Features - -- fix: adds CVE details for unembargoed NMS-34814 (3b1771e7364b9a9d11bd2f9e4156ed3ebb34070d) -- Merge branch 'NMS-34814-update-rn' into 'main' (606262a5dfd1585c6474182d940e0670b168f07f) -- Merge branch 'fix-link-oidc-devportal' into 'main' (b09c8aa5c0158d5dc5e63343768690becbb38a69) - -### Fixes - -- updated link to another repo (2f45faa3db574bb9461b222154024949e0eb3475) - -## 0.25.0 -2022-08-04 - -### Features - -- fix: NMS-32522-Setup-OIDC-Dev-Portals (4727426e72f8cac8ef349c6d466b6e0f9a236397) -- Merge branch 'NMS-32522-Setup-OIDC-Dev-Portals' into 'main' (42142743c440ec72ba7d1c92142c66eece3e68d9) - -## 0.24.0 -2022-08-03 - -### Features - -- docs: Add NGINX Signing Key on NMS Install Guide (6f6b6983902da2e0c58789876ed93a5a15755fb7) -- Merge branch 'add-deb-signing-keys' into 'main' (35a393be03b118fa635ad4862a6053e7b3d17cf6) - -## 0.23.0 -2022-08-02 - -### Features - -- Merge branch 'NMS-33344-Policies-Overview' into 'main' (46625e9af5a5c4fe2a27cc61d3646815ab6c0181) -- Resolve NMS-33344 "Policies overview" (d9d8fe7e46326e03d58dee9f9563ca89a183c4b1) - -## 0.22.0 -2022-08-02 - -### Features - -- Merge branch 'DOCOPS-1284-nim-manage-instance-groups' into 'main' (721a6485341076e8e96fe69dfe886a98791bd340) -- Resolve DOCOPS-1284 "Nim manage instance groups" (3094c807bbbe2d65c6ad2f14f12f276143f91c05) - -## 0.21.0 -2022-08-02 - -### Features - -- Merge branch 'DOCOPS-1314-Update-a-WebUI-step' into 'main' (20d4b41e30c88c0cdf54ae9c73c5dc9b34d31950) - -### Fixes - -- updated per AC (4bf4dbbe27ba68b88f30c367d05492b596a38a5a) -- updated UI step (c191ee7b99ce446863c6d124c7a92d011eee6450) - -## 0.20.0 -2022-07-29 - -### Features - -- fix: redirect for moved migration guide (f3a6398da2e05b35c3b53706c799837359d2899f) -- Merge branch 'fix-nim-install-guide-redirect' into 'main' (79123fc688c728f1951a2019c50eac00257d96a4) - -## 0.19.0 -2022-07-29 - -### Features - -- fix: reorged NIM 2 IA for migration topic (de4e272e4865109c964b86439879ee35241d32a9) -- Merge branch 'cp-53b07d53-move-nim-migration-guide' into 'main' (3dda6b785b96282b5a24dd98eee708d6da44bcef) - -## 0.18.0 -2022-07-28 - -### Features - -- Merge branch 'docops-1312-1313' into 'main' (ef4300ffccbaf6a8bfcc50ba78bcbca04e18edd6) - -### Fixes - -- DOCOPS-1312 & DOCOPS-1313 (aabe44277f43a3b0922e1b985a005d8489c9cd3e) - -## 0.17.0 -2022-07-28 - -### Features - -- Merge branch 'NMS-34693_oidc_update' into 'main' (0956399077d1b421e9f64a3b74fa2caa15b86e93) - -## 0.16.0 -2022-07-27 - -### Features - -- Merge branch 'docops-1309-nim-heml-install' into 'main' (d8722eeccd1356c155de2efd463bcc834b5b3a83) - -### Fixes - -- NIM helm install works only on NIM 2.1.0 (40261b0e296ff263c11c327bcdd10afa2d73492e) - -## 0.15.0 -2022-07-27 - -### Features - -- Merge branch 'update-readme-branching' into 'main' (5114cb88797e291cd02987ae08d780e10551761f) - -## 0.14.0 -2022-07-25 - -### Features - -- Merge branch 'docops-1294' into 'main' (ccbce8ec922e7dd70bc07a41ffa1324c17a63c5c) - -### Fixes - -- DOCOPS-1294 (9f8a8971503cb07c604019f16fd366ac58ddd928) - -## 0.13.0 -2022-07-25 - -### Features - -- Merge branch 'docops-1292' into 'main' (d1c51ac8626fb8022b697a4f8c7fd434d7a7f0e9) - -### Fixes - -- DOCOPS-1292 (07a625439448565409477a0c1cf04710bf1d8edb) - -## 0.12.0 -2022-07-25 - -### Features - -- Merge branch 'remove-nim-1.x-screenshots' into 'main' (ca5590fb691a1232ff76150dbbdef2b2b7d0c0ba) - -### Fixes - -- removes screenshots from NIM 1.x docs (b58c23cc7b245de7079b87c73d17410cc3783c09) - -## 0.11.0 -2022-07-25 - -### Features - -- Merge branch 'docops-1291-restore-nim-1.x-docs' into 'main' (62811b2522901deb29e41fd6cf245b57bccd7767) - -### Fixes - -- restores NIM 1.x docs (0e5aa34a234831a9ebdb70a5d966634985be13dc) - -## 0.10.0 -2022-07-21 - -### Features - -- Merge branch 'fix-broken-link-nim-known-issues' into 'main' (3e5155883c9f69a0df45bc1df5edcbe40778b06f) - -### Fixes - -- broken link to upgrade guide in NIM known issues (6f6154545bce7d72aaf165d12df6d585c22fff23) - -## 0.9.0 -2022-07-21 - -### Features - -- Merge branch 'NMS-36040-known-issue' into 'main' (22e1ad58599439b422cbf6d68fcf5171c787e2ab) - -### Fixes - -- added known issue for nms-36040 (0e558d6b8280a28c316d094c52801662465a71d2) - -## 0.8.0 -2022-07-21 - -### Features - -- Merge branch 'DOCOPS-1154-nim-2.3.1-RNs' into 'main' (4fde4f4ce8946e4aedfe2d866e76fb8304291c5e) - -### Fixes - -- add NIM 2.3.1 RNs (29e769c58401fdfd95d2f16c45b19a83ec5b4358) - -## 0.7.0 -2022-07-19 - -### Features - -- ACM GA v1.0.0 (c58ec5de2e9fb25a022934990b79387a2ccd36dd) -- Merge branch 'ACM-GA-1.0-Docs-Release-Branch' into 'main' (78941f06f66e4da4115743a8e2dbd5553c523da0) - -## 0.4.0 -2022-07-05 - -### Features - -- Merge branch 'NIM-2.3-doc-release-branch' into 'main' (6597f12b9013c16e8e7c81e108074bd4baf207f2) - -## 0.3.0 -2022-06-10 - -### Features - -- Merge branch 'bloviate-product-names' into 'main' (7499e8e8f37a842d5b0cf552aef314cb0128e389) - -### Fixes - -- added full product names to doc IA (60f7c694c021a36770e812f4620f5c7ae6ad05e4) - -## 0.2.0 -2022-06-06 - -### Features - -- Merge branch 'docops-857-port-over-platform-docs' into 'main' (686e852a7d0ceb79a1194bc1d2a4eb9516e0bd88) - -## 0.1.0 -2022-06-02 - -### Features - -- repo CI, Hugo, and Netlify setup (b0244a439205d89954070c78ed976b3c9aa1e2d3) -- Merge branch 'setup-gitlab-ci' into 'main' (1b37c69a9d997b8c2b8b944fc8bf7d1622883a12) - -### Fixes - -- initial commit (22bba50a14debf38dd8e607823bc333146cc3098) -- ACM Docs folder structure (acd7412074a5e1b50fc7d903cc572db9c85409cb) -- Updated the structure as per the IA (bd1c6d27e68dd2a0873e9cc444260bbec980b636) diff --git a/content/nms/CODEOWNERS b/content/nms/CODEOWNERS deleted file mode 100644 index dcd1d84ff..000000000 --- a/content/nms/CODEOWNERS +++ /dev/null @@ -1,27 +0,0 @@ -# All files -[Tech Writers][1] -/ @tmartin-nginx @j.cahilltorre @a.dooley @jputrino - -[Admin Guides][1] -content/admin-guides/ @j.barrios @forsyth1 @jclopper @ssharma13 @n.mcelwain @jputrino @noumba @i.ukpe @ch.adams @mvernik @tmartin-nginx - -[Tech Specs][1] -content/overview/tech-specs.md @j.barrios @forsyth1 @jclopper @n.mcelwain @jputrino @tmartin-nginx - -[NIM App Sec][1] -content/nim/nginx-app-protect/ @noumba @j.barrios @i.ukpe @jputrino @tmartin-nginx - -[NIM][1] -content/nim/ @forsyth1 @jclopper @j.barrios @ssharma13 @jputrino @tmartin-nginx - -[ACM][1] -content/acm/ @n.mcelwain @jputrino @tmartin-nginx - -[Security Monitoring][1] -content/security/ @noumba @j.barrios @i.ukpe @jputrino @tmartin-nginx - -[Agent][1] -content/nginx-agent/ @ch.adams @mvernik @jputrino @tmartin-nginx - -[Support][1] -content/support/ @j.barrios @forsyth1 @jclopper @ssharma13 @n.mcelwain @jputrino @tmartin-nginx diff --git a/content/nms/_index.md b/content/nms/_index.md deleted file mode 100644 index 8e28584da..000000000 --- a/content/nms/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: F5 NGINX Management Suite -nd-subtitle: A set of tools that enable enterprise scalability, security, observability, and governance. -url: /nginx-management-suite/ -cascade: - logo: "NGINX-Management-Suite-product-icon.svg" - noindex: true - nd-banner: - enabled: true - type: deprecation - md: _banners/eos-nms.md ---- diff --git a/content/nms/about.md b/content/nms/about.md deleted file mode 100644 index 0b76f378a..000000000 --- a/content/nms/about.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: About -description: F5 NGINX Management Suite brings together advanced features into a single - platform, making it easy for organizations to configure, monitor, and troubleshoot - NGINX instances; manage and govern APIs; optimize load balancing for apps; and enhance - overall security. -weight: 10 -toc: true -type: concept -nd-docs: DOCS-905 ---- - -Explore the topics below to find out what the F5 NGINX Management Suite modules have to offer. - ---- - -## Instance Manager {#instance-manager} - -[NGINX Instance Manager]({{< ref "/nim/">}}) allows you to configure, scale, and manage NGINX Open Source and NGINX Plus instances at scale. Instance Manager provides a [REST API]({{< ref "/nim/fundamentals/api-overview">}}) and web-based graphical user interface (GUI) for managing NGINX instances across multiple servers, making it easier to configure, monitor, and troubleshoot NGINX deployments. - -Instance Manager can be used to manage instances running on-premises, in the cloud, or in hybrid environments, and it supports the deployment of NGINX instances on a variety of operating systems and container platforms. - -Instance Manager also includes advanced features like health checks, rolling updates, and configuration backups, which help to ensure the reliability and security of NGINX deployments. - -### Instance Manager Key Features - -Instance Manager provides the following features: - -- [View metrics and information]({{< ref "/nim/monitoring/view-events-metrics">}}) about data plane host systems and NGINX instances -- [View, edit, and publish NGINX configurations]({{< ref "/nim/nginx-configs/publish-configs">}}) -- [Save NGINX configurations]({{< ref "/nim/nginx-configs/publish-configs#stage-config">}}) for future deployment -- [Analyze NGINX configurations]({{< ref "/nim/nginx-configs/publish-configs">}}) for syntactic errors before publishing them -- [Scan the network]({{< ref "/nim/nginx-instances/scan-instances#scan-ui">}}) to find unmanaged NGINX instances. -- [Manage certificates]({{< ref "/nim/nginx-instances/manage-certificates">}}) -- [Create users, roles, and role permissions]({{< ref "/nim/admin-guide/rbac/overview-rbac">}}) for role-based access control - ---- - -## Security Monitoring {#security-monitoring} - -Security Monitoring allows you to monitor F5 WAF for NGINX with analytics dashboards and security log details to get protection insights for analyzing possible threats or areas for tuning policies. - -### Security Monitoring Key Features - -The Security Monitoring module provides the following features: - -- Informative dashboards that provide valuable protection insights -- In-depth security log details to help with analyzing possible threats and making policy decisions - ---- - -## What's Next? - -- [Review the Technical Specifications]({{< ref "/nim/fundamentals/tech-specs.md">}}) -- [Install NGINX Management Suite]({{< ref "/nim/deploy/_index.md">}}) diff --git a/content/nms/acm/_index.md b/content/nms/acm/_index.md deleted file mode 100644 index 2acb5eb31..000000000 --- a/content/nms/acm/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: API Connectivity Manager -weight: 500 -url: /nginx-management-suite/acm/ -cascade: - noindex: true - nd-banner: - enabled: true - type: deprecation - md: _banners/eos-acm.md ---- - diff --git a/content/nms/acm/about/_index.md b/content/nms/acm/about/_index.md deleted file mode 100644 index 6bef60e8d..000000000 --- a/content/nms/acm/about/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -description: "Learn about API Connectivity Manager" -title: About -weight: 100 -url: /nginx-management-suite/acm/about/ ---- diff --git a/content/nms/acm/about/api-overview.md b/content/nms/acm/about/api-overview.md deleted file mode 100644 index 504340c24..000000000 --- a/content/nms/acm/about/api-overview.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -description: This topic gives an overview of the F5 NGINX Management Suite API Connectivity - Manager API. -nd-docs: DOCS-929 -title: API Overview -toc: true -weight: 300 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Introduction - -API Connectivity Manager provides a [RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer) API that uses standard authentication methods, HTTP response codes, and verbs. - -## Object Model - -You can use the API Connectivity Manager API to connect, secure, and govern your APIs. In addition, API Connectivity Manager lets you separate infrastructure lifecycle management from the API lifecycle, giving your IT/Ops teams and application developers the ability to work independently. - -The API Connectivity Manager API provides the following features: - -- Create and manage isolated Workspaces for business units, development teams, and so on, so each team can develop and deploy at its own pace without affecting other teams. -- Create and manage API infrastructure in isolated workspaces. -- Enforce uniform security policies across all workspaces by applying global policies. -- Create Developer Portals that align with your brand, with custom color themes, logos, and favicons. -- Onboard your APIs to an API Gateway and publish your API documentation to the Developer Portal. -- Let teams apply policies to their API proxies to provide custom quality of service for individual applications. -- Onboard API documentation by uploading an OpenAPI spec. -- Publish your API docs to a Dev Portal while keeping your API's backend service private. -- Let users issue API keys or basic authentication credentials for access to your API. -- Send API calls by using the Developer Portal's API Reference documentation. - -## API Reference Documentation - -You can view the API Connectivity Manager API Reference documentation in the F5 NGINX Management Suite user interface. -To access the API Docs, take the steps below: - -1. Log in to the NGINX Management Suite user interface. -2. From the Launchpad, select the **Docs** card. -3. Select **API Connectivity Manager** from the **Docs** list in the sidebar. The API Connectivity Manager API Reference documentation will then display. - -## Authentication - -API Connectivity Manager supports authentication by using basic authentication or a JSON Web Token (JWT). You can get a JWT by logging in with an OpenID Connect (OIDC) Identity Provider. - -For more information about the available authentication options for NGINX Management Suite, refer to [Set Up Authentication]({{< ref "/nim/admin-guide/authentication/basic-auth/set-up-basic-authentication.md" >}}). - -### Basic Authentication - -You can make API requests with basic auth by sending the base64-encoded credentials as a "Basic" token in the "Authorization" request header, as shown in the example below. - -```shell -curl -X GET "https:///api/acm//workspaces/infrastructure" -H "Authorization: Basic YWRtaW..." -``` - -{{< call-out "warning" >}}Even when encoded, basic authentication is not secure. The use of basic auth is not recommended for production environments.{{< /call-out >}} - -### JSON Web Token - -If your organization is using OIDC, you will be prompted to log in with your Identity Provider the first time you attempt to reach an API. After authenticating, you can request a JWT to use in subsequent API calls. - -{{< call-out "note" >}} -
      - -- The means of requesting a token varies according to the Identity Provider; if you're not sure which provider your organization uses, check with your system administrator or technical support team. -- Automated CI/CD workflows are not supported when using OIDC authentication. -{{< /call-out >}} - -The JWT should be sent as a "Bearer" token in the "Authorization" request header, as shown in the example below. - -```shell -curl -X GET "https:///api/acm//workspaces/infrastructure" -H "Authorization: Bearer " -``` - -## Usage - -### Command-line - -You can use tools such as `curl` or [Postman](https://www.postman.com) to interact with the API Connectivity Manager REST API. -The API URL follows the format `https:///api/acm/`. - -{{< call-out "note" >}}When making API calls by using `curl`, Postman, or any other tool, you will need to provide your authentication information with each call. {{< /call-out >}} - -### User Interface - -You can also use the "Try it Out" function in the API Reference docs to send a call to the API Connectivity Manager API. You do not have to provide a means of authentication when sending API calls via the API Documentation UI because you are already logged in to the NGINX Management Suite platform. - -To do so, take the steps below: - -1. Select the endpoint and action that you want to send. For example: `POST /infrastructure/workspaces`. -2. Select the **Try it Out** button. -3. If the endpoint accepts parameters, replace the placeholder examples in the request body with your desired values. -4. Select the **Execute** button to send the request. -5. When the request completes, the response appears in the UI. - -## Errors and Response Codes - -API Connectivity Manager uses standard HTTP response codes to indicate whether an API request succeeds or fails. Codes in the `2xx` range mean the request succeeded. Codes in the `400` range mean the request failed due to the reason(s) indicated in the response message. Common reasons for `4xx` responses are: - -- requests where required information is missing; -- lack of or incorrect authentication credentials; and -- requests that refer to resources that do not exist or are in use by other resources. - -**HTTP Status Codes** -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Response Code | Meaning | -|---------------|---------| -| 200 | Success: The request was received. | -| 201 | Success: Created the requested resource. | -| 202 | Success: The request was accepted and configuration is in process. | -| 204 | Success: Deleted the requested resource. | -| 400 | Bad Request: Required information is missing or incorrectly formatted. | -| 401 | Unauthorized: You are not logged in or do not have permission to access the requested resource. | -| 404 | Not found: The requested resource does not exist. | -| 409 | Conflict: The requested resource already exists or is referenced by another resource. | - -{{< /bootstrap-table >}} - -## Encoding - -All API Connectivity Manager API endpoints expect and return JSON-formatted data by default. -All JSON-formatted data is expected to be encoded using UTF-8. If you do not specify a media type when sending an API call, then the API defaults to `application/json`. - -## Pagination - -Top-level API Connectivity Manager API endpoints support fetching information about multiple resources ("lists"). Such requests may return large data sets (for example, `GET /services/workspaces/{workspaceName}/proxies` and `GET /services/workspaces/{workspaceName}/proxies/{proxyName}/jobs`). For these endpoints, you can define the size of the data set returned for each call and navigate amongst the pages of data when sending subsequent calls. - -### Parameters - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Name| Format| Type| Description | Default value| -|:---|---|---|--------|--------| -|`page`|integer|query| page number | `1`| -|`pageToken`|string|query|Transactional token used for pagination.
      The token ensures consistency of the query results across requests for various pages of data. It provides a snapshot of the database contents from the time at which the query was received.
      If `pageToken` is not provided with a page request, a token is automatically generated and will be returned in the response metadata. You should include the token in subsequent requests for pages in the query results.

      Sending a query without a pageToken refreshes the query results.|N/A | -|`pageSize`|integer|query|Defines the number of returned items per page.

      The maximum value is 100. If the value is greater, it is automatically scaled down to 100.

      If `pageSize=0`, pagination is disabled and the full dataset will be returned in the response.
      The response size is limited to 10,000 results. If the number of results exceeds 10,000 a warning is returned.|`100`| - -{{< /bootstrap-table >}} - -## Versioning - -Each major version of the API Connectivity Manager API is backward-compatible with the previous releases in that version. -The introduction of backward-incompatible changes to the API Connectivity Manager API constitutes a major version change. -This will be represented in the `` section of the API URI. - -For example, to use a v2 API, you would send requests to `https:///api/acm/v2`. diff --git a/content/nms/acm/about/architecture.md b/content/nms/acm/about/architecture.md deleted file mode 100644 index 051dd124a..000000000 --- a/content/nms/acm/about/architecture.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -description: Learn about the F5 NGINX Management Suite API Connectivity Manager architecture. -nd-docs: DOCS-892 -title: Architecture Overview -toc: true -weight: 400 -type: -- concept ---- - -{{< shortversions "1.0.0" "latest" "acmvers" >}} - -## Overview - -This topic provides an overview of the API Connectivity Manager architecture and personas. - ---- - -## Terminology - -This document introduces the following concepts. - -### Topology - -{{}} - -|
      Term
      | Description | -|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Management Plane | The management plane is an abstraction layer used to configure, monitor, and manage the layers of a network stack. API Connectivity Manager, a part of the management plane, establishes guardrails and configures rules for the data plane. | -| Data Plane | [F5 NGINX Plus](https://www.f5.com/products/nginx/nginx-plus) instances in the traffic path that act as load balancers, API gateways, firewalls, ingress controllers, and caching systems. | -| Proxy Cluster |

      NGINX is widely known as a reverse proxy, and a Proxy Cluster is a set of one or more NGINX Plus servers working together. A proxy cluster keeps configurations in sync across all instances and maintains data consistency by sharing the runtime state.

      Examples:

      • **API Gateway Cluster**: A cluster of one or more NGINX Plus instances acting as a single proxy for API requests.
      • **Dev Portal Cluster**: A cluster of one or more NGINX Plus instances configured to act as Developer Portals. Developer portals provide a framework for hosting API documentation, provisioning access keys, and managing approval workflows. In addition, you can test your APIs with the "Try It Out" feature.
      | - -{{
      }} - -### Platform Services - -API Connectivity Manager uses [NATS](https://nats.io) to communicate with the NGINX Management Suite platform services. - -{{< include "nms/services/platform-services.md" >}} - ---- - -## Architecture - -The following diagram shows how API Connectivity Manager's components are organized and interact. - -{{< call-out "note" >}}API Connectivity Manager takes an API-first approach: commands issued using the web interface are processed using the API Connectivity Manager REST API. -{{< /call-out >}} - -{{API Connectivity Manager architecture}} - ---- - -## Personas - -### Infrastructure Admin - -Infrastructure Admins, interacting primarily with the management plane, manage the infrastructure for hosts. - -#### Routine tasks - -- Configure SSO -- Provision the infrastructure -- Configure domain names -- Manage data plane hosts -- Manage certificates -- Enforce global/enterprise policies - -### API Owner - -API Owners oversee the API lifecycle, which they can maintain using a CI/CD pipeline. - -The API Owner relies on the Infrastructure Admin to complete the initial configuration before beginning work. - -#### Routine tasks - -- Set up an API team -- On-board an API -- Configure policies to meet *Quality of Service (QoS)* commitments -- Select the API Gateway cluster for publishing an API -- Select the Dev Portal cluster for publishing API documentation - -### Application Owner - -Application Owners develop new digital experiences. - -#### Routine tasks - -- Learn about APIs and API contracts by reading the documentation on the Dev Portal. -- Test APIs using the "Try It Out" feature in the on-board documentation. diff --git a/content/nms/acm/about/images/HighLevelComponents.png b/content/nms/acm/about/images/HighLevelComponents.png deleted file mode 100644 index 0a93d42f8..000000000 Binary files a/content/nms/acm/about/images/HighLevelComponents.png and /dev/null differ diff --git a/content/nms/acm/about/introduction.md b/content/nms/acm/about/introduction.md deleted file mode 100644 index 519fc340d..000000000 --- a/content/nms/acm/about/introduction.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -nd-docs: DOCS-1385 -title: Introduction -weight: 100 ---- - -[API Connectivity Manager]({{< ref "/nms/acm/">}}) enables self-service and automation of API delivery. API Connectivity Manager allows you to deploy, configure, secure, monitor, and govern API gateways at scale. - -The API Connectivity Manager module provides a [REST API]({{< ref "/nms/acm/about/api-overview">}}) that uses standard authentication methods, HTTP response codes, and verbs. - -You can use the API Connectivity Manager API to connect, secure, and govern your APIs. In addition, API Connectivity Manager lets you separate infrastructure lifecycle management from the API lifecycle, allowing your IT/Ops teams and application developers to work independently. - -## API Connectivity Manager Key Features - -The API Connectivity Manager module provides the following features: - -- [Create and manage isolated workspaces]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#create-a-workspace">}}) -- [Create and manage API infrastructure]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-an-environment">}}) in isolated workspaces -- [Enforce uniform security policies]({{< ref "/nms/acm/how-to/policies/tls-policies.md" >}}) across workspaces using global policies -- [Create Developer Portals]({{< ref "/nms/acm/how-to/infrastructure/publish-developer-portal.md" >}}) with custom color themes, logos, and favicons -- [Onboard APIs to an API Gateway]({{< ref "/nms/acm/how-to/infrastructure/publish-developer-portal.md#add-an-api-doc" >}}) and [publish API documentation]({{< ref "/nms/acm/how-to/infrastructure/publish-developer-portal.md#publish-the-api-documentation-and-api-proxy" >}}) to the Developer Portal -- [Apply policies to API proxies]({{< ref "/nms/acm/how-to/policies/manage-policies.md#configure-proxy-policies" >}}) to provide custom quality of service for individual applications -- [Issue API keys]({{< ref "/nms/acm/how-to/infrastructure/enable-sso-devportal" >}}) or basic authentication credentials for access to the API diff --git a/content/nms/acm/about/policies-overview.md b/content/nms/acm/about/policies-overview.md deleted file mode 100644 index 6ee79a117..000000000 --- a/content/nms/acm/about/policies-overview.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -description: Learn about the policies available for use in F5 NGINX Management Suite - API Connectivity Manager. -nd-docs: DOCS-932 -title: Available Policies -toc: true -weight: 500 -type: -- reference ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -This page gives an overview of the available policies in API Connectivity Manager. Policies allow you to protect and secure your services and their data. - ---- - -## Policy Types - -There are two types of policies for API Connectivity Manager: - -{{}} - -| Policy Type | Description | -|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Global policies](#global-policies) | Global policies, typically managed by an Enterprise Security or Support team, are onboarded as a one-time task when onboarding an API. Global policies are enforced for all of the APIs in an environment. | -| [API proxy policies](#api-proxy-policies) | When onboarding APIs to API Connectivity Manager, API owners define API-level policies to enforce security and behavior characteristics for their APIs. | - -{{}} - -### Global Policies {#global-policies} - -Global policies are enforced for all of the APIs in an environment. Global policies are commonly prescribed by an Enterprise Security or Support team; the Security or Support team decides if API owners can edit the global policies. - -{{< include "acm/about/global-policies.md" >}} - -### API Proxy Policies {#api-proxy-policies} - -Apply API gateway proxy policies to enhance the experience of your APIs. - -{{< include "acm/about/api-proxy-policies.md" >}} - diff --git a/content/nms/acm/about/rbac-roles.md b/content/nms/acm/about/rbac-roles.md deleted file mode 100644 index bd43740f4..000000000 --- a/content/nms/acm/about/rbac-roles.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "RBAC roles" -weight: 600 -nd-docs: "DOCS-1667" ---- - -## Built-In Roles - -### API Connectivity Manager - -API Connectivity Manager comes pre-configured with roles suitable for API Owners and Infrastructure Admins. - -- **API Owner**: The individuals or teams who are responsible for designing, creating, and maintaining APIs. -- **Infrastructure Admin**: Infrastructure Administrators ensure uniform governance across an organization’s infrastructure by setting policies at the infrastructure level, enabling teams to build APIs without interruption while adhering to the organization’s standards. - -#### ACM API Owner {#acm-api-owner} - -{{< include "acm/rbac/api-owner-role.md" >}} - -{{< call-out "note" >}}The tutorial [Set Up RBAC for API Owners]({{< ref "/nms/acm/tutorials/rbac-api-owners.md">}}) provides an example of how to configure RBAC for API owners.{{< /call-out>}} - -
      - -#### ACM Infra Admin {#acm-infra-admin} - -{{< include "acm/rbac/infra-admin-role.md" >}} - -{{< call-out "note" >}}The tutorial [Set Up RBAC for Infra Admins]({{< ref "/nms/acm/tutorials/rbac-infra-admins.md">}}) provides an example of how to configure RBAC for Infrastructure Administrators.{{< /call-out>}} diff --git a/content/nms/acm/about/technical-specifications.md b/content/nms/acm/about/technical-specifications.md deleted file mode 100644 index 1a3e43420..000000000 --- a/content/nms/acm/about/technical-specifications.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -nd-docs: DOCS-1470 -title: Technical Specifications -weight: 200 ---- - -### Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -{{< call-out "important" >}} If you're [installing API Connectivity Manager in an offline environment]({{< ref "/nim/disconnected/offline-install-guide.md#install-acm-offline" >}}) and the minimum required version of Instance Manager is not installed, the API Connectivity Manager installer will exit. You'll need to [install Instance Manager manually]({{< ref "/nim/disconnected/offline-install-guide.md#install-nim-offline" >}}) before installing API Connectivity Manager.{{< /call-out >}} - -### API Connectivity Manager Supported NGINX Versions {#acm-supported-nginx} - -{{< include "tech-specs/acm-supported-nginx.md" >}} - -### Developer Portal Supported Distributions {#dev-portal-supported-distributions} - -{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} - ---- - -## Supported Linux Distributions - -{{< call-out "note" "API Connectivity Manager" >}}Make sure you review the [supported distributions for the Developer Portal](#dev-portal-supported-distributions) host before installing the API Connectivity Manager module. There is a slight difference between the supported distributions in that list and this one. -{{< /call-out >}} - -API Connectivity Manager supports the following Linux distributions: - - -{{}} - -| Distribution | Version | Architecture | Instance Manager | API Connectivity Manager | -|---------------------------------------------|----------------------------------------------------------------------------------------------------------------|----------------------------|------------------------------------------------------|------------------------------------------------------| -| Amazon Linux | 2 LTS | x86_64 | Supported | Supported | -| CentOS | 7.4 and later in the 7.x family | x86_64 | Supported | Supported | -| Debian | 11
      12 | x86_64
      x86_64 | Supported
      Supported on 2.13+ | Supported
      Not supported | -| Oracle Linux | 7.4 and later in the 7.x family
      8.0 and later in the 8.0.x family | x86_64
      x86_64 | Supported
      Supported on 2.6.0+ | Supported
      Supported on 1.3.0+ | -| RHEL | 7.4 and later in the 7.x family
      8.x and later in the 8.x family
      9.x and later in the 9.x family | x86_64
      x86_64
      x86_64 | Supported
      Supported
      Supported on 2.6.0+ | Supported
      Supported
      Supported on 1.3.0+ | -| Ubuntu | 20.04
      22.04 | x86_64
      x86_64 | Supported
      Supported on 2.3.0+ | Supported
      Supported | - -{{
      }} - - - - -## Supported NGINX Versions - -{{< include "tech-specs/acm-supported-nginx.md" >}} diff --git a/content/nms/acm/getting-started/_index.md b/content/nms/acm/getting-started/_index.md deleted file mode 100644 index 769839119..000000000 --- a/content/nms/acm/getting-started/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Learn how to get up and running with F5 NGINX Management Suite API Connectivity Manager." -title: Getting Started Guides -weight: 400 -url: /nginx-management-suite/acm/getting-started/ ---- - diff --git a/content/nms/acm/getting-started/add-api-gateway.md b/content/nms/acm/getting-started/add-api-gateway.md deleted file mode 100644 index 0af2db5d9..000000000 --- a/content/nms/acm/getting-started/add-api-gateway.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - configure an API Gateway. -nd-docs: DOCS-921 -title: Set Up an API Gateway Environment -toc: true -weight: 100 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -In API Connectivity Manager, an API Gateway is a proxy cluster that contains one or more NGINX data plane instances. -These clusters are managed under **Infrastructure Workspaces** and are part of **Environments**. - -### Before You Begin - -Before proceeding with this guide, you should familiarize yourself with the [API Overview]({{< ref "/nms/acm/about/api-overview" >}}) and the [Get Started]({{< ref "/nms/acm/getting-started/overview" >}}) section of this series. - -## Add an Infrastructure Workspace - -First, you'll need to create an Infrastructure Workspace. -This is a logical grouping that allows for separation between business units or teams. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces`| - -{{}} - - -```json -{ - "name": "{{infraWorkspaceName}}", - "metadata": { - "description": "App Development Workspace" - }, - "contactDetails": { - "adminEmail": "I.M.Devs@example.com", - "adminName": "I.M. Devs", - "adminPhone": "555 321 1234" - } -} -``` - -## Add an Environment - -Next, add an Environment. - -Environments contain **API Gateways** and **Develper Portals**. -Use the appropriate example below to deploy an API Gateway with either HTTP, HTTP2, or HTTPS. - -### HTTP - -> {{< icon "lightbulb" >}} Use this example to get up and running quickly in a demo environment. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentName}}", - "functions": [ - "API-GATEWAY" - ], - "proxies": [ - { - "proxyClusterName": "{{instanceGroupName}}", - "hostnames": [ - "{{environmentHostname}}" - ], - "runtime": "GATEWAY-PROXY" - } - ] -} -``` - -### HTTPS - -To deploy a cluster that uses HTTPS for secure inbound communication, you'll add the **TLS Inbound** policy. -Because this is done at the Infrastructure level, this is considered a "Global Policy". - -> {{< icon "lightbulb" >}} You need to provide a valid TLS server certificate and key in this API call. - -{{}} -Need to add requirements for sending this info? Base64 encoding required? -{{}} - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentName}}", - "proxies": [ - { - "proxyClusterName": "{{instanceGroupName}}", - "hostnames": [ - "{{environmentHostname}}" - ], - "policies": { - "tls-inbound": [ - { - "data": { - "serverCerts": [ - { - "key": "{{tls key}}", - "cert": "{{tls cert}}" - } - ] - } - } - ] - } - } - ] -} -``` - -### HTTP2 - -To deploy a cluster that uses HTTP2 for secure inbound communication, you'll add the **TLS Inbound** policy. -Because this is done at the Infrastructure level, this is considered a "Global Policy". - -> {{< icon "lightbulb" >}} You need to provide a valid TLS server certificate and key in this API call. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentname}}", - "proxies": [ - { - "proxyClusterName": "{{instanceGroupName}}", - "listeners": [ - { - "transportProtocol": "HTTP2", - "port": 443, - "tlsEnabled": true - } - ], - "hostnames": [ - "{{environmentHostname}}" - ], - "policies": { - "tls-inbound": [ - { - "data": { - "serverCerts": [ - { - "key": "{{tls key}}", - "cert": "{{tls cert}}" - } - ] - } - } - ] - } - } - ] -} -``` - -## Onboard F5 NGINX Plus Instances into the Cluster - -Take the steps in this section to install the NGINX Agent on the data plane instances to onboard them into the proxy cluster that you created in the [previous step](#add-an-environment). - -To do so, you need to interact directly with the NGINX Plus data plane hosts. - -- SSH access to the hosts and `sudo` permissions are required. -- You can add up to three NGINX Plus data plane instances to the cluster. - -### Install NGINX Agent on the Data Plane Hosts {#onboard-nginx-plus} - -1. Use SSH to connect and log in to each of the NGINX Plus data plane hosts that you want to add to the API Gateway cluster. -1. Run the onboarding command as root using cURL to download, install, configure, and start the NGINX Agent package. - - - Replace `{{nms-fqdn}}` in the example command with the FQDN or IP address of your API Connectivity Manager management plane host. - - Make sure `-g {{clusterName}}` uses the name of your API Gateway cluster. - - ```bash - curl --insecure https://{{nms-fqdn}}/install/nginx-agent > install.sh && \ - sudo sh install.sh -g {{clusterName}} && sudo systemctl start nginx-agent - ``` - -### Verify the Settings - -Try sending traffic to the hostname you configured for the API Gateway. Send a PUT request to the endpoint shown below to update the Environment. - -1. Send a GET request to the endpoint shown below to verify that the instances were added to the Clusters. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}`| -| GET | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}?includes=instances&includes=status`| - -{{}} diff --git a/content/nms/acm/getting-started/add-devportal.md b/content/nms/acm/getting-started/add-devportal.md deleted file mode 100644 index 78ffac5ae..000000000 --- a/content/nms/acm/getting-started/add-devportal.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - configure a Developer Portal. -nd-docs: DOCS-922 -title: Set Up a Developer Portal Environment -toc: true -weight: 200 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -In API Connectivity Manager, a Developer Portal (or, "Dev Portal") is a cluster of F5 NGINX Plus data plane instances. -These clusters are managed under **Infrastructure Workspaces** and are part of **Environments**. - -### Before You Begin - -You should complete the following Quick Start Guide(s) before proceeding with the steps in this guide: - -- [Configure an API Gateway]({{< relref "./add-api-gateway" >}}) - -## Add a Developer Portal - -Complete the steps in this guide to add a Developer Portal to the Environment you created in the [previous guide]({{< relref "./add-api-gateway" >}}). - -When a Developer Portal environment is created, the API Connectivity Manager configures -a virtual server through which the developer portal service and API Connectivity Manager communicate. By default, the hostname for this server is the -hostname that you provided for the Developer Portal cluster, prefixed with `acm.`. For example: `acm.dev-portal.io`. This virtual server listens on port 81. - -You will need to update your DNS resolver settings to ensure this hostname is resolvable. -The hostname and port for this server can be updated by selecting the **Edit Portal <-> API Connectivity Manager Connectivity** from the **Actions** menu for your desired developer portal. - -{{< call-out "important" >}} - -- Be sure to provide the IP address or FQDN of the host where you installed the Dev Portal packages as the `{{portalClusterHostname}}`. -- The Dev Portal must run on a dedicated host with the [`njs`](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) module installed. -{{< /call-out >}} - -Use the appropriate example below to deploy an HTTP or HTTPS Developer Portal. - -### HTTP - -> {{< icon "lightbulb" >}} Use this example to get up and running quickly in a demo environment. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentName}}", - "functions": [ - "DEVPORTAL" - ], - "proxies": [ - { - "proxyClusterName": "{{portalInstanceGroupName}}", - "hostnames": [ - "{{portalClusterHostname}}" - ], - "runtime": "PORTAL-PROXY" - } - ] -} -``` - -### HTTPS - -To deploy a cluster that uses HTTPS for secure inbound communication, you'll add the **TLS Inbound** policy. -Because this is done at the Infrastructure level, this is considered a "Global Policy". - -> {{< icon "lightbulb" >}} You need to provide your TLS server certificate and key as base64-encoded strings in this API call. - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentName}}", - "functions": [ - "DEVPORTAL" - ], - "proxies": [ - { - "proxyClusterName": "{{portalInstanceGroupName}}", - "hostnames": [ - "{{portalClusterHostname}}" - ], - "runtime": "PORTAL-PROXY", - "policies": { - "tls-inbound": [ - { - "data": { - "serverCerts": [ - { - "key": "{{tls key}}", - "cert": "{{tls cert}}" - } - ] - } - } - ] - } - } - ] -} -``` - -## Onboard an NGINX Plus Instance into the Cluster - -Take the steps in this section to install the NGINX Agent on the data plane instance where you installed the Developer Portal packages. -This onboards the host into the proxy cluster that you created in the [previous step](#add-an-environment). - -To do so, you'll need to interact directly with the host. -SSH access to the host and `sudo` permissions are required. - -### Install NGINX Agent on the Data Plane Host - -1. Use SSH to connect and log in to the Dev Portal host. -1. Run the onboarding command as root to download, install, configure, and start the NGINX Agent package. - - - Replace `{{nms-fqdn}}` in the example command with the FQDN or IP address of your Dev Portal host. - - Replace `{{clusterName}}` in the example command with the name of your Developer Portal cluster. - - ```bash - curl --insecure https://{{nms-fqdn}}/install/nginx-agent > install.sh && \ - sudo sh install.sh -g {{clusterName}} && sudo systemctl start nginx-agent - ``` - -### Update the DNS Record - -The NGINX Management Suite management plane host uses the Developer Portal's hostname to communicate with the Dev Portal. -You’ll need to update your DNS resolver settings with the Developer Portal's internal hostname. - -> {{< icon "lightbulb" >}} The internal hostname is the hostname that you provided for the Developer Portal, prefixed with `acm.`. -> For example: `acm.dev-portal.io` - -Next, open the Developer Portal in a browser window and make sure the portal loads. - -## Customize the Developer Portal - -In this step, you'll apply a set of customizations to the Developer Portal. -Because these settings are applied at the Infrastructure level, they are considered "global", meaning they apply to each Dev Portal Proxy that you associate with the cluster. - -{{< call-out "note" >}}Refer to [Customize the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) to learn more about the available customization options and how to customize a Dev Portal via the API Connectivity Manager user interface.{{< /call-out>}} - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/devportals/{{devPortalName}}`| - -{{}} - - -Note that many fields in the example JSON payload -- including the logo image and Markdown documents -- are base64-encoded. - -**Example JSON payload**: {{< icon "download" >}} {{< link "/acm/customize-devportal.json" "customize-devportal.json" >}} - -Before you move on to the next guide, open the Dev Portal in your browser to view the changes. -You should see the default Dev Portal replaced by the custom settings. diff --git a/content/nms/acm/getting-started/overview.md b/content/nms/acm/getting-started/overview.md deleted file mode 100644 index f33ab2840..000000000 --- a/content/nms/acm/getting-started/overview.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -description: Learn how to get up and running with F5 NGINX Management Suite API Connectivity - Manager. -nd-docs: DOCS-939 -title: Get Started -toc: true -weight: 10 ---- - - - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Objectives - -By completing the guides in this Quick Start series, you can easily get up and running with API Connectivity Manager. - -This series covers the following topics: - -1. Setting up an environment with [API Gateway]({{< relref "./add-api-gateway" >}}) & [Developer Portal]({{< ref "add-devportal" >}}) clusters. -2. Onboarding F5 NGINX Plus instances onto the clusters. -3. [Publishing an API proxy]({{< ref "publish-api-proxy" >}}) with or without an OpenAPI spec. - ---- - -## Requirements - -To complete the instructions in this series, you must meet the following requirements: - -1. [Install API Connectivity Manager and Developer Portal]({{< ref "/nim/deploy/_index.md" >}}) on [separate hosts]({{< ref "/nim/fundamentals/tech-specs.md" >}}). -2. [Install a supported version of NGINX Plus]({{< ref "/nim/fundamentals/tech-specs.md" >}}) on one or more hosts to serve as the API Gateway. -3. [Install the `njs` module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) on your NGINX data plane and Dev Portal hosts. -4. You have SSH access and `sudo` permissions for the NGINX data plane host(s). -5. You have an API Connectivity Manager user account with permission to READ, CREATE, UPDATE, and DELETE on the following API Connectivity Manager features: - - - Infrastructure - - Services - ---- - -## How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - -For example: - -```shell -curl --location --request POST 'https://{{nms-fqdn}}/api/acm/v1/services/workspaces/{{workspaceName}}' \ ---header 'Accept: application/json' \ ---header 'Content-Type: application/json' \ ---header 'Authorization: Bearer ' \ ---data-raw '' -``` - ---- - -## Variables - -The examples provided in these guides use a set of common variables. -You should replace these variables -- or define a set of values for them in your API client -- with information relevant to your environment before trying to use them. -The variables used in the examples are described in the table below. - - -{{< bootstrap-table "table table-hover table-bordered" >}} -|Variable | Definition | -|---|-------| -| `nms-fqdn`| The fully-qualified domain name (FQDN) or IP address of the host running NGINX Management Suite.
      This is also referred to as the "management plane" host. | -| `backendIp` | The IP address or hostname of a backend server running an API service. | -| `nginxInstance` | The IP address or hostname of an NGINX data plane instance. | -| `devPortalIp` | The IP address or hostname of the instance hosting the Developer Portal. | -| `username` | Your account username. | -| `password` | Your account password. | -| `instanceGroupName` | The name of the API Gateway. This name is recorded as an Instance Group name by the NGINX Agent. | -| `infraWorkspaceName` | The name of the Infrastructure Workspace that you want to work in. | -| `proxyWorkspaceName` | The name of the Service Workspace that you want to work in. | -| `proxyName` | The name of the Proxy that you want to create, read, update, or delete. | -| `environmentName` | The name of the Environment that you want to work in. | -| `environmentHostname` | The hostname of the API Gateway. | -| `devPortalName` | The resource name of the Developer Portal Proxy. | -| `portalDocsName` | The resource name of the API Docs. | -| `portalInstanceGroupName` | The resource name of the Developer Portal. | -| `portalClusterHostname` | The hostname for the Developer Portal. | -| `clusterName` | The proxy cluster name for the Developer Portal or API Gateway. | - -{{< /bootstrap-table >}} - diff --git a/content/nms/acm/getting-started/publish-api-proxy.md b/content/nms/acm/getting-started/publish-api-proxy.md deleted file mode 100644 index 4f0cfef28..000000000 --- a/content/nms/acm/getting-started/publish-api-proxy.md +++ /dev/null @@ -1,496 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - publish an API Proxy. -nd-docs: DOCS-923 -title: Publish an API Proxy -toc: true -weight: 300 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -In API Connectivity Manager, **Services** represent your Backend APIs. -**Proxies** represent the NGINX reverse proxy that routes traffic to your backend service and to the Developer Portal. -This guide provides instructions and examples for publishing an API and a Developer Portal by using the REST API. - -### Before You Begin - -You should complete the following Quick Start Guides before proceeding with the steps in this guide: - -1. [Set Up an API Gateway Environment]({{< relref "./add-api-gateway" >}}) -1. [Set Up a Developer Portal Environment]({{< ref "add-devportal" >}}) - -## Create a service workspace -Services workspaces is a logical grouping of APIs. A user can created multiple workspaces that match an organizational structure. - -{{}} - -{{%tab name="UI"%}} - -1. Select the **Services** option on the left hand menu. -1. Select the **Create Workspace** button. -1. Enter a name. -1. (Optional) Provide a description of the workspace. -1. (Optional) Select the **Contact Information** check box to provide contact details. -1. Select the **Create** button. - - -{{%/tab%}} -{{%tab name="API"%}} - - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/services/workspaces`| - -{{}} - - -```json -{ - "name": "{{proxyWorkspaceName}}", - "metadata": { - "description": "Petstore Team Workspace" - }, - "contactDetails": { - "adminEmail": "admin@example.com", - "adminName": "I.M. Admin", - "adminPhone": "555 123 1234" - } -} -``` - -{{%/tab%}} -{{}} - - - - - -## Publish API Proxy without OpenAPI Spec {#set-up-api-proxy} - -An API proxy connects the backend services to the API-Gateway. - -{{}} - -{{%tab name="UI"%}} - -After creating the workspace, you can select **Publish API Proxy** or open the previously created workspace. - -On the Publish API Proxy window: -### Backend Service - -1. Type a name for the backend service. -1. Type the **Service Target Hostname**; this can be an IP or FQDN. -1. For the **Service Target Transport Protocol**, if your backend service is using gRPC, then select gRPC. -1. Type the **Service Target Port**, or use the arrow buttons to increase or decrease the port number. - -### API Proxy - -1. Type a name for the API Proxy. -1. Select No in the **Use an OpenAPI spec** option. -1. Select the **Gateway Proxy Hostname from** the menu. -{{< call-out "note" >}}If this field is disabled, check the job status of your environment on the infrastructure workspace page.{{< /call-out >}} - -### Ingress - -1. Enter the Base Path that you wish to route traffic to. -1. Type the version of your API. -1. Select **Publish**. - -### Confirm Setup - -1. Open a terminal application. -1. Run the following command: - - ```curl - curl -k -X GET "https://gateway-proxy-hostname/version/basepath" - ``` - -1. If your proxy is set up correctly, you can send traffic. - -{{< call-out "note" >}}By default the ingress append rule is set to `PREFIX` so your request must be in the form of `version/basepath` {{< /call-out >}} - - - - - -{{%/tab%}} -{{%tab name="API"%}} -After creating the service workspace, you can select **Publish API Proxy**, or you can follow these steps: - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST | `/services/workspaces/{{proxyWorkspaceName}}/proxies`| - -{{}} - - - - -The basic configuration below creates an API Proxy to a backend service. - -```json -{ - "name": "{{proxyName}}", - "metadata": { - "description": "Swagger Petstore Proxy" - }, - "version": "v1", - "proxyConfig": { - "hostname": "{{environmentHostname}}", - "ingress": { - "basePath": "/v1" - }, - "backends": [ - { - "serviceName": "petstore-api-svc", - "serviceTargets": [ - { - "hostname": "petstore.example.com" - } - ] - } - ] - } -} -``` - -{{%/tab%}} -{{}} - - - -## Publish API Proxy with OpenAPI Spec {#publish-api-proxy-with-spec} -{{< include "acm/openapi-support" >}} - -### Uploading an OAS Schema -OAS Schemas can be uploaded to API Connectivity Manager and stored for use as references for *Proxy* deployments. -The routes contained in the OAS Schema will be used to create the routes for your *Proxy* - -### Creating a Proxy with an OAS -After you have uploaded your OAS Schema as an *API Doc*, you can then reference that *API Doc* in your *Proxy* deployments using the `specRef` parameter in the JSON payload. -Using the `specRef` will then associate that OAS Schema in API Connectivity Manager and allow API Connectivity Manager to create your routes from the information contained in the OAS Schema. - -### Extended support for OAS in API Connectivity Manager -API Connectivity Manager now allows you to set up an API gateway using Open API Specification by supporting the creation of *Backends* (upstream servers) from the supplied OAS using an API Connectivity Manager specific *x-* extension in your OAS document. -API Connectivity Manager now also supports server URL templating in the global URL(s). - -
      -Example JSON - -```json -"servers": [ - { - "url": "https://{server}.example.com/api/{version}", - "variables": { - "version": { - "default": "v1" - }, - "server": { - "default": "staging" - } - }, - "x-acm-append-rule": "NONE", - "x-acm-strip-basepath": false, - "x-acm-backends": [ - { - "serviceName": "pets-backend", - "serviceVersion": "pets-backend-v1", - "serviceLabel": "default", - "contextRoot": "/dev", - "upstreams": [ - { - "url": "https://gecho1.null.ie", - "maxFails": 10, - "maxConnections": 5, - "failTimeout": "5s", - "slowStart": "10s" - }, - { - "url": "https://gecho2.null.ie", - "maxFails": 5, - "maxConnections": 8, - "failTimeout": "15s", - "slowStart": "3s" - }, - { - "url": "https://gecho3.null.ie", - "maxFails": 7, - "maxConnections": 33, - "failTimeout": "35s", - "slowStart": "1s" - } - ] - } - ] - } -], -``` - -
      - -  - - -### Server URL Templating - -```json -"servers": [ - { - "url": "https://{server}.example.com/api/{version}", - "variables": { - "version": { - "default": "v1" - }, - "server": { - "default": "staging" - } - }, -``` - -In the above section, we can see how server URL templating will make substitutions with a matching value from the variables section of the server object in the specification. -Each placeholder in the URL *must* have a matching variable in the variables section or the validation will fail and return an error. - -### Creating Backends -This section explains how to create a backend target for our API Gateway configuration, a Backend is a collection of upstream servers bundled under one "Service label". -An API Gateway can have multiple *Backends* which can each contain multiple upstream servers. - -```json -"x-acm-backends": [ - { - "serviceName": "pets-backend", - "serviceVersion": "pets-backend-v1", - "serviceLabel": "default", - "contextRoot": "/dev", - "upstreams": [ - { - "url": "https://server.example.com", - "maxFails": 10, - "maxConnections": 5, - "failTimeout": "5s", - "slowStart": "10s" - }, -``` - -In the above example, we can see how to create a single *Backend* with a single upstream server. - -{{}} - -| Variable | Purpose | Required | Default | Context | -|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------|----------| -| serviceName | provides a human-readable identifier to the Backend | true | none | Backend | -| serviceVersion | provides some version metadata should it be required | false | none | Backend | -| serviceLabel | provides a means to target this backend from this and other API Gateway deployments | true | default | Backend | -| contextRoot | sets the service root path for the upstream servers, i.e. /dev would mean that all requests proxied to /api/v1 would be proxied to /dev/api/v1 on the upstream servers. | false | / | Backend | -| upstreams | array of upstream servers, requires at least one server to be provided. | true | none | Backend | -| url | the URL of the upstream server, a port should be provided if using non-standard scheme -> port mappings, i.e. http:80, https:443 | true | none | Upstream | -| maxFails | sets the number of unsuccessful attempts to communicate with the server that should happen in the duration set by the `fail_timeout` parameter to consider the server unavailable for a duration also set by the `fail_timeout` parameter | false | 0 | Upstream | -| maxConnections | limits the maximum `_number_` of simultaneous active connections to the proxied server | false | 0 | Upstream | -| failTimeout | sets the time during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable and the period of time the server will be considered unavailable. | false | 10s | Upstream | -| slowStart | sets the `_time during which the server will recover its weight from zero to a nominal value, when an unhealthy server becomes healthy, or when the server becomes available after being unavailable. | false | none | Upstream | - -{{}} - -All values supplied in the OAS Specification are only modifiable through the OAS Specification and not through the API or UI, this means that the OAS Specification is the source of truth for all values supplied within it. -If values are omitted from the OAS Schema then they may be added or modified via the API or UI. - -### Proxy Basepath -It is possible to modify the basepath provided using two additional extensions: -`x-acm-append-rule` and `x-acm-strip-basepath`. - -`x-acm-append-rule` is a legacy configuration option that was used to either prepend or append the version field from the `info` section to your API basepath, going forward the basepath should be added explicitly to the global server URL section in exactly the manner in which it is to be used, for example: - -`x-acm-append-rule` defaults to `NONE` and the version field in the `info` section is only used as the document version metadata in favor of explicitly adding the version to the server URL. `x-acm-append-rule` should ONLY be used for legacy deployments that used a value other than `NONE` - -`x-acm-strip-basepath` is a boolean value that denotes whether to strip the basepath from the request URI before proxying the request to the backend servers. - -{{}} - -| Incoming URI | basePath | stripBasepath | Context Root | Proxied URI | -|-----------------------|----------|---------------|--------------|------------------------| -| /api/v1/customers | /api/v1 | false | / | /api/v1/customers | -| /api/v1/customers | /api/v1 | true | / | /customers | -| /api/v1/customers/123 | /api/v1 | true | / | /customers/123 | -| /api/v1/customers | /api/v1 | false | /prod | /prod/api/v1/customers | -| /api/v1/customers | /api/v1 | true | /prod | /prod/customers | - -{{}} - -- When you upload an OpenAPI spec, API Connectivity Manager automatically generates a name for the API Docs object using the following format: - -`"info.title"-"info.version"` - -- The string is "URL-ized", meaning any whitespace gets converted to dashes (`-`) and all letters are lowercase. -If we used the OpenAPI example [Petstore API](https://github.com/OAI/OpenAPI-Specification/blob/main/tests/v3.0/pass/petstore.yaml), the auto-generated name for the API Docs would be `petstore-v1`. - -{{}} - -{{%tab name="UI"%}} - -1. Enter a name for the backend service. -1. Type the **Service Target Hostname**; this can be an IP or FQDN. -1. In the **Service Target Transport Protocol** menu, select gRPC if your backend service uses gRPC. -1. Enter the *Service Target Port*, or use the arrow buttons to increase or decrease the port number. - -### API Proxy - -1. Enter a name for the API Proxy. -1. Select Yes in the **Use an OpenAPI spec** option. -1. Select the **Add API Spec** button. -1. Select the **Browse** button and select a YAML or JSON file. -1. After the file uploads you can either select or search for your API spec. -1. Select **Publish**. - -### Ingress -Populated from API Specification and are read-only - -### Confirm Setup - -1. Open a terminal application. - -1. Run the following command: - - ```curl - curl -k -X GET "https://gateway-proxy-hostname/version/basepath" - ``` - -{{< call-out "note" >}} By default the ingress append rule is set to `NONE` when using an OAS Schema so your request must match the `basepath` you have supplied as part of your Global Server URL. {{< /call-out >}} - -{{%/tab%}} -{{%tab name="API"%}} - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST | `/services/workspaces/{{proxyWorkspaceName}}/api-docs`| -| POST | `/services/workspaces/{{proxyWorkspaceName}}/proxies`| - -{{}} - - -Take the steps below to add an API Proxy with an OpenAPI spec using either version 3.0.x or 3.1. - -1. Send a POST request containing the OpenAPI spec for your API to the `api-docs` endpoint to upload it. - - ```json - { - "info": { - "version": "1.0.0", - "title": "Swagger Petstore", - "license:" { - "name": "MIT" - } - }, - "openapi": "3.0.0", - "paths": {...} - } - ``` - -1. Send a POST request to the `proxies` endpoint to create a new API Proxy. In this example, `specRef` contains the name that API Connectivity Manager assigned to the API Docs object: `petstore-1`. - - ```json - { - "name": "{{proxyName}}", - "metadata": { - "description": "Swagger Petstore API" - }, - "version": "v1", - "specRef": "petstore-1", - "proxyConfig": { - "hostname": "{{environmentHostname}}", - "ingress": { - "basePath": "/v1" - }, - "backends": [ - { - "serviceName": "petstore-api-svc", - "serviceTargets": [ - { - "hostname": "petstore.example.com" - } - ] - } - ] - } - } - ``` - -{{%/tab%}} -{{}} - -## Publish an API Doc to Developer Portal -Next, you can publish API Docs to your Developer Portal. - -API Connectivity Manager uses the `portalConfig.hostname` setting to connect your Dev Portal Proxy to the Developer Portal. -You should define this field using the hostname that you assigned to the Developer Portal in the [Set Up a Developer Portal]({{< ref "add-devportal" >}}) guide. - -{{}} - - -{{%tab name="UI"%}} - -Refer to [Publish API Proxy with OpenAPI Spec](#publish-api-proxy-with-spec). - -1. Select the **Also publish API to developer portal** option -1. Select the **Portal Proxy Hostname**. -1. (Optional) Enter a category if required. -1. Select **Publish** - -Open the Developer Portal and you should see the API doc is now displayed on the page. - -{{%/tab%}} -{{%tab name="API"%}} - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| PUT | `/services/workspaces/{{proxyWorkspaceName}}/proxies/{{proxyName}}`| - -{{}} - - - -The example below adds the Developer Portal to the same API Proxy that you created in the [previous section](#set-up-api-proxy). - -```json -{ - "name": "{{proxyName}}", - "specRef": "petstore-1", - "version": "v1", - "proxyConfig": { - "hostname": "{{environmentHostname}}", - "ingress": { - "basePath": "/v1" - }, - "backends": [ - { - "serviceName": "petstore-api-svc", - "serviceTargets": [ - { - "hostname": "petstore.example.com" - } - ] - } - ] - }, - "portalConfig": { - "hostname": "{{portalClusterHostname}}" - } -} -``` - -{{%/tab%}} -{{}} diff --git a/content/nms/acm/how-to/_index.md b/content/nms/acm/how-to/_index.md deleted file mode 100644 index e633dd278..000000000 --- a/content/nms/acm/how-to/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -description: "Task-oriented topics that focus on how to use F5 NGINX Management Suite API Connectivity Manager." -title: How-To Guides -weight: 500 -url: /nginx-management-suite/acm/how-to/ ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/backup-recovery.md b/content/nms/acm/how-to/backup-recovery.md deleted file mode 100644 index b88b100b9..000000000 --- a/content/nms/acm/how-to/backup-recovery.md +++ /dev/null @@ -1,308 +0,0 @@ ---- -title: "Back up and recovery" -toc: true -nd-docs: "DOCS-1668" ---- - -## Overview - -F5 NGINX Management Suite includes several scripts for backing up and restoring the configuration files, secrets, and databases used by the platform. - -{{< call-out "important" >}}The backup and recovery scripts are provided for reference and may need to be changed for your deployment.{{< /call-out >}} - ---- - -## NGINX Management Suite and API Connectivity Manager deployed in a Virtual Machine or Bare Metal - -### Before you begin - -To complete the instructions in this guide, you need the following: - -- An installed version of Instance Manager -- An installed version of API Connectivity Manager -- Instance Manager versions older than 2.15.0 will require an installed version of SQLite. Refer to the [Install SQLite]({{< ref "/nim/admin-guide/maintenance/sqlite-installation.md" >}}) guide for installation instructions. -- The NGINX Management Suite services must be running: - - ```shell - sudo systemctl start nms - ``` - -### Make scripts executable - -To run the backup and restore scripts, you need to set their permissions to make them executable. - -1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. -1. Change to the directory where the scripts are located: - - ```shell - cd /etc/nms/scripts - ``` - -1. Run the following commands to make the scripts executable: - - ```shell - sudo chmod +x backup.sh - sudo chmod +x restore.sh - sudo chmod +x backup-acm.sh - sudo chmod +x restore-acm.sh - sudo chmod +x support-package.sh - ``` - -### Include module data - -By default, the data for API Connectivity Manager isn't included in the backup. - -To back up module data, follow these steps: - -1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. -1. Change to the directory where the scripts are located: - - ```shell - cd /etc/nms/scripts - ``` -1. Edit the `backup.sh` and `restore.sh` scripts and uncomment the commands in the relevant sections: - - In **backup.sh**, uncomment the following lines: - - ```shell - ## Back up API Connectivity Manager - # Uncomment the following line to back up API Connectivity Manager. - ACM_ACTIVE=$(systemctl is-active --quiet nms-acm) - IS_ACM_ACTIVE=$? - if [ $IS_ACM_ACTIVE -ne 0 ]; then - echo "You need to start the required NGINX Management Suite - services before running the backup script." - echo "Please ensure the following nms service is running:" - echo "nms-acm" - exit 1 - fi - ``` - - ```shell - ## Back up API Connectivity Manager - # Uncomment the following line to back up API Connectivity Manager. - ./backup-acm.sh - ``` - - In **restore.sh**, uncomment the following lines: - - ```shell - ## Back up API Connectivity Manager - # Uncomment the following line to back up API Connectivity Manager. - ACM_ACTIVE=$(systemctl is-active --quiet nms-acm) - IS_ACM_ACTIVE=$? - if [ $IS_ACM_ACTIVE -eq 0 ]; then - echo "You need to stop the required NGINX Management Suite - services before running the restore script." - echo "Please ensure the following nms service is stopped:" - echo "nms-acm" - exit 1 - fi - ``` - - ```shell - ## Restore the API Connectivity Manager database. - # Uncomment the following line to restore API Connectivity Manager. - ./restore-acm.sh - ``` - -### Back up and restore NGINX Management Suite and API Connectivity Manager - -To back up the NGINX Management Suite configuration files, secrets, and databases: - -1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. -1. To back up NGINX Management Suite, run the following commands: - - ```shell - cd /etc/nms/scripts - sudo ./backup.sh - ``` - - The backup is saved to a tarball file similar to the following example: `/tmp/nms-backup-.tgz` - -To restore NGINX Management Suite: - -1. Open a secure shell (SSH) connection to the NGINX Management Suite host and log in. -1. To restore NGINX Management Suite, run the following commands: - - ```shell - cd /etc/nms/scripts - sudo ./restore.sh /tmp/nms-backup-.tgz - ``` ---- - -## NGINX Management Suite and API Connectivity Manager deployed in a Kubernetes Cluster - -### Before you begin - -To complete the instructions in this guide, you need the following: - -- An installed version of NGINX Management Suite and Instance Manager -- An installed version of API Connectivity Manager -- Instance Manager versions older than 2.15.0 will require an installed version of SQLite. Refer to the [Install SQLite]({{< ref "/nim/admin-guide/maintenance/sqlite-installation.md" >}}) guide for installation instructions. - - - - -- Root Access - - To back up and restore the NGINX Management Suite on Kubernetes, run the scripts as a superuser with `sudo`. These scripts use the `kubectl` command to interact with the Kubernetes API. It is necessary to ensure the target Kubernetes cluster is accessible to the root user. - - To confirm that the root user has access to the Kubernetes API, run the following command: - - ```shell - sudo kubectl -n nms get pods - ``` - - If the result is error-free and the output is the list of currently running pods/nodes the root user has the required access. - - If the root user does not have the required access, you will need to configure the root user to have Kubernetes API access, or provide the script with the location of the Kubernetes configuration via the environment variable `KUBECONFIG`. For example: - - ```shell - KUBECONFIG=/etc/kubernetes/admin.conf - ``` - - In the example above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the location accordingly. - -- Utility pod - - To back up and restore NGINX Management Suite in a Kubernetes cluster, you need to install the `utility` pod in your Kubernetes cluster. For each module you want to back up and restore, you need to configure the `utility` pod accordingly: - - 1. Update your [Helm Deployment values.yaml file]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}), add the `utility: true` line under `global` to enable the utility pod, and the required sections under `nmsModules` to back up and restore API Connectivity Manager. Example below: - - ```yaml - global: - utility: true - nmsModules : - nms—acm: - enabled: true - addClaimsToUtility: true - ``` - - 1. [Upgrade your NGINX Management Suite deployment]({{< ref "/nim/deploy/kubernetes/deploy-using-helm#helm-upgrade-nim" >}}) to apply the changes. - - 1. Download the NGINX Management Suite Helm chart for your currently installed version of NGINX Management Suite: - - ```shell - helm repo add nginx-stable https://helm.nginx.com/stable - helm repo update - helm pull nginx-stable/nms - tar zxvf nms-.tgz - ``` - -### Back up NGINX Management Suite and API Connectivity Manager - -To back up NGINX Management Suite deployed in a Kubernetes cluster, follow these steps: - -1. Copy the backup script `k8s-backup.sh` extracted from `nms-.tgz` to your working directory: - - ```shell - cp nms-/charts/nms-hybrid/backup-restore/k8s-backup.sh . - ``` - -1. Make the scripts executable: - - ```shell - chmod +x k8s-backup.sh - ``` - -1. Run the backup script: - - ```shell - ./k8s-backup.sh - ``` - - {{< call-out "note" >}}The backup script does not need the `utility` pod or `sudo` permissions to create a backup.{{< /call-out >}} - -1. The command will ask for the NGINX Management Suite namespace. The script will create a backup archive in the same directory called `k8s-backup-.tar.gz`. - -### Full restoration to the same Kubernetes Cluster - -To restore NGINX Management Suite and the installed modules deployed in the same Kubernetes cluster, follow these steps: - -1. Copy the restore script `k8s-restore.sh` extracted from `nms-.tgz` to your working directory: - - - For NGINX Management Suite and API Connectivity Manager, copy `k8s-restore.sh` from the `nms-/charts/nms-hybrid/backup-restore/` directory. - - ```shell - cp nms-/nms/charts/nms-hybrid/backup-restore/k8s-restore.sh . - ``` - -1. Make the scripts executable: - - ```shell - chmod +x k8s-restore.sh - ``` - -1. Copy your `k8s-backup-.tar.gz` file to the same directory as the `k8s-restore.sh` script. - -1. Run the restore script: - - ```shell - sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r - ``` - - In the command above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the command accordingly. - - - {{< call-out "note" >}}The restore script [needs root access]({{< ref "/nms/acm/how-to/backup-recovery.md#root-access" >}}) to Kubernetes for the restore operation.{{< /call-out >}} - -1. The script will ask for the NGINX Management Suite namespace. Once the namespace has been provided, the script will use the specified backup archive. - - {{< call-out "note" >}}The script will use the `utility` pod to access all the mounted volumes to restore database directories and core secrets; and `kubectl` to restore the Kubernetes configmaps and secrets. Before starting the restoration, the script will stop all service pods and start the `utility` pod. After finishing the restore, it will stop the `utility` pod and start all service pods.{{< /call-out >}} - - -### Data-only restoration to a different Kubernetes Cluster - -To restore NGINX Management Suite and the installed modules into a different Kubernetes cluster, follow these steps: - -1. Copy the restore script `k8s-restore.sh` extracted from `nms-.tgz` to your working directory: - - - For NGINX Management Suite and API Connectivity Manager, copy `k8s-restore.sh` from the `nms-/charts/nms-hybrid/backup-restore/` directory. - - ```shell - cp nms-/nms/charts/nms-hybrid/backup-restore/k8s-restore.sh . - ``` - -1. Make the scripts executable: - - ```shell - chmod +x k8s-restore.sh - ``` - -1. Copy your `k8s-backup-.tar.gz` file to the same directory as the `k8s-restore.sh` script. - -1. Run the restore script: - - ```shell - sudo KUBECONFIG=/etc/kubernetes/admin.conf ./k8s-restore.sh -i k8s-backup-.tar.gz -r -d - ``` - - In the command above, `/etc/kubernetes/admin.conf` is the default configuration location of a Kubernetes cluster. If the configuration location is different for the target Kubernetes cluster, update the command accordingly. - - - {{< call-out "note" >}}The restore script [needs root access]({{< ref "/nms/acm/how-to/backup-recovery.md#root-access" >}}) to Kubernetes for the restore operation.{{< /call-out >}} - -1. The script will ask for the NGINX Management Suite namespace. Once the namespace has been provided, the script will use the specified backup archive. - -The restore script will only restore the databases and core secrets. If you want to restore the user passwords too, run the following commands on the extracted `k8s-backup-.tar.gz` file: - - ```shell - cd k8s-backup-/secrets - kubectl -n nms apply -f nms-auth.json - kubectl -n nms delete pod apigw- - ``` - ---- - -## ClickHouse - -ClickHouse supports backup and restore on versions greater than v22. - -For instructions on how to back up and restore the ClickHouse database, please refer to [ClickHouse's documentation](https://clickhouse.com/docs/en/operations/backup). - -To check your ClickHouse version, run the following command: - -```shell -clickhouse-server --version -``` diff --git a/content/nms/acm/how-to/deploy-api-connectivity-manager.md b/content/nms/acm/how-to/deploy-api-connectivity-manager.md deleted file mode 100644 index f8c1d8fe6..000000000 --- a/content/nms/acm/how-to/deploy-api-connectivity-manager.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -description: The guide provides step-by-step instructions to deploy F5 NGINX API Connectivity - Manager on Kubernetes using a Helm chart. -nd-docs: DOCS-1276 -title: Deploy API Connectivity Manager on Kubernetes -toc: true -weight: 20 -type: -- how-to ---- - -## Requirements - -Review the following requirements for API Connectivity Manager before continuing. - -### Install Instance Manager - -{{< call-out "important" >}}To install API Connectivity Manager, you must first install Instance Manager. This is because API Connectivity Manager relies on features that are included with Instance Manager.{{< /call-out >}} - -- [Deploy Instance Manager on Kubernetes]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}) - -### Dependencies with Instance Manager - -Refer to the following table to see the module compatibility for each F5 NGINX Management Suite chart. - -{{< include "nim/kubernetes/nms-chart-supported-module-versions.md" >}} - - ---- - -## Download Docker Image {#download-docker-image} - -Follow these steps to download the Docker image for API Connectivity Manager: - -1. Go to the [MyF5 website](https://my.f5.com/manage/s/downloads), then select **Resources > Downloads**. -1. In the **Select Product Family** list, select **NGINX**. -1. In the **Product Line** list, select **NGINX API Connectivity Manager**. -1. Select the following download options: - - - **Product version** -- Select the version of API Connectivity Manager you want to install. Make sure this version is compatible with the version of Instance Manager you installed as a prerequisite. Refer to the [Dependencies with Instance Manager](#dependencies-with-instance-manager) section above. - - **Linux distribution** -- Select the Linux distribution you're deploying to. For example, **ubuntu**. - - **Distribution Version** -- Select the Linux distribution's version. For example, **20.04**. - - **Architecture** -- Select the architecture. For example, **amd64**. - -1. In the **Download Files** section, download the `nms-acm--img.tar.gz` file. - ---- - -## Load Docker Image {#load-docker-image} - -{{< call-out "note" >}} To complete the commands in this section, you need to have [Docker 20.10 or later](https://docs.docker.com/get-docker/) installed. {{< /call-out >}} - - -1. Change to the directory where you downloaded the Docker image: - - ``` shell - cd - ``` - -1. Load the Docker image from the `nms-acm--img.tar.gz` archive: - - ``` shell - docker load -i nms-acm--img.tar.gz - ``` - - The output looks similar to the following: - - ``` shell - $ docker load -i nms-acm--img.tar.gz - 1b5933fe4b5: Loading layer [==================================================>] 5.796MB/5.796MB - fbe0fc9bcf95: Loading layer [==================================================>] 17.86MB/17.86MB - ... - 112ae1f604e0: Loading layer [==================================================>] 67.8MB/67.8MB - 4b6a693b90f4: Loading layer [==================================================>] 3.072kB/3.072kB - Loaded image: nms-acm:1.5.0 - ``` - - {{< call-out "important" >}} - Take note of the loaded image's name and tag. You'll need to reference this information in the next section when pushing the image to your private registry. - - In the example output above, `nms-acm` is the image name and `1.5.0` is the tag. The image name or tag could be different depending on the product version you downloaded from MyF5. - {{< /call-out >}} - ---- - -## Push Image to Private Registry {#push-docker-image} - -{{< call-out "note" >}}To complete the steps in this section, you need an [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to.{{< /call-out >}} - -To push the Docker images to your private registry, take the following steps: - -- Replace `` with your private Docker registry and port (if needed). - -- Replace `` with the tag you noted when [loading the Docker image](#load-acm-docker-image) above. - -1. Log in to your private registry: - - ```shell - docker login - ``` - -2. Tag the image with the image name and version you noted when [loading the Docker image](#load-acm-docker-image). - - ```shell - docker tag nms-acm: /nms-acm: - ``` - - For example: - - ```shell - docker tag nms-acm:1.5 myregistryhost:5000/nms-acm:1.5 - ``` - -3. Push the image to your private registry: - - ```shell - docker push /nms-acm: - ``` - - For example: - - ```shell - docker push nms-acm:1.5 myregistryhost:5000/nms-acm:1.5 - ``` - ---- - -## Enable API Connectivity Manager - -To enable the API Connectivity Manager Module, take the following steps: - -1. Open the `values.yaml` file for editing. -1. Add the following snippet to the `values.yaml` file: - - - Replace `` with your private Docker registry and port (if needed). - - Replace `` with the tag you noted when [loading the Docker image](#load-acm-docker-image) above. - - In the `imagePullSecrets` section, add the credentials for your private Docker registry. - - ```yaml - # values.yaml - global: - nmsModules: - nms-acm: - enabled: true - nms-acm: - imagePullSecrets: - - name: regcred - acm: - image: - repository: /nms-acm - tag: - ``` - -1. Close and save the `values.yaml` file. - ---- - -## Upgrade NGINX Management Suite Deployment {#upgrade-nms} - -{{< call-out "note" >}} To complete the steps in this section, you need to have [OpenSSL 1.1.1](https://www.openssl.org/source/) or later installed. {{< /call-out >}} - -Run the following command to upgrade the NGINX instance deployment: - -- Replace `` with the path to the [values.yaml file you created]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md#configure-chart" >}}). -- Replace `YourPassword123#` with a secure password that contains a combination of uppercase and lowercase letters, numbers, and special characters. - - {{< call-out "important" >}}Make sure to copy and save the password for future reference. Only the encrypted password is stored in Kubernetes. There's no way to recover or reset a lost password.{{< /call-out >}} - -- (Optional) Replace `` with the desired version; see the table below for the available versions. Alternatively, you can omit this flag to install the latest version. - -```bash -helm upgrade -n nms --set nms-hybrid.adminPasswordHash=$(openssl passwd -6 'YourPassword123#') nms nginx-stable/nms -f [--version ] --wait -``` - -This command upgrades an existing Helm chart deployment named `nms` with a new version of the chart located in the `nginx-stable/nms` repository. It also sets the value of the `nms-hybrid.adminPasswordHash` to the hashed version of the provided password and uses a `values.yaml` file located at the provided path. - -### Upgrade Existing API Connectivity Manager Deployment {#upgrade-acm-helm} - -If you've already deployed API Connectivity Manager and would like to upgrade to a newer version, take the following steps: - -1. Repeat the steps above to: - - - [Download Newer Docker Image](#download-docker-image) - - [Load Docker Image](#load-docker-image) - - [Push Image to Private Docker Registry](#push-docker-image) - -2. Run the `helm upgrade` command above to [upgrade the NGINX Management Suite deployment](#upgrade-nms). - ---- - -## Access Web Interface - -{{< include "nim/kubernetes/access-webui-helm.md" >}} - ---- - -## Add License - -A valid license is required to make full use of all the features in API Connectivity Manager. - -Refer to the [Add a License]({{< ref "/nim/admin-guide/add-license.md" >}}) topic for instructions on how to download and apply a trial license, subscription license, or Flexible Consumption Program license. - ---- - -## Configurable Helm Settings - -The following table lists the configurable parameters and default values used by the API Connectivity Manager chart when installing from a Helm chart. - -To modify a configuration for an existing release, run the `helm upgrade` command and use `-f `, where `my-values-file` is a path to a values file with your desired configuration. - -{{}} - -| Parameter | Description | Default | -|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------| -| `nms-acm.acm.logLevel` | Set the log level for the backend API service. The log level can be `fatal`, `error`, `warning`, `info`, or `debug` | `info` | -| `nms-acm.acm.image.repository` | Repository name and path for the `acm` image. | `acm` | -| `nms-acm.acm.image.tag` | Tag used for pulling images from registry. | `latest` | -| `nms-acm.acm.image.pullPolicy` | Image pull policy. | `IfNotPresent` | -| `nms-acm.acm.container.port.http` | TCP port for the pod to listen on. | `8037` | -| `nms-acm.acm.container.port.db` | Port to use for Dqlite. | `9300` | -| `nms-acm.acm.metrics.enabled` | Enable metrics. | `false` | -| `nms-acm.acm.service.httpPort` | TCP port for the service to listen on. | `8037` | -| `nms-acm.acm.resources.requests.cpu` | CPU resource limits to allow for the `acm` pods. | `500m` | -| `nms-acm.acm.resources.requests.memory` | Memory resource limits to allow for the `api` pods. | `512Mi` | -| `nms-acm.acm.persistence.enabled` | Optionally disable persistent storage, used for database data. | `true` | -| `nms-acm.acm.persistence.claims` | An array of persistent volume claims, can be modified to use an existing PVC. | See the [Dqlite](#acm-dqlite-configuration) configuration section below. | -| `nms-acm.acm.devportal.credentials.enabled` | Enables the [Create Credentials Endpoint on the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/enable-create-credentials.md" >}}) | `false` | -| `nms-acm.acm.devportal.credentials.ssl` | This should be set to true if mTLS has been configured between API Connectivity Manager and the Developer Portal, for more information see [Create Credentials Endpoint on the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/enable-create-credentials.md" >}}) | `false` | -| `nms-acm.acm.devportal.client.caSecret.name` | This should be set if an unknown Certificate Authority is needed for communication with the Developer Portal in order to provide a CA certificate. This should be set to the name of the secret in the release namespace that contains the CA certificate. | Blank | -| `nms-acm.acm.devportal.client.caSecret.key` | This should be set if an unknown Certificate Authority is needed for communication with the Developer Portal in order to provide a CA certificate. This should be set to the key of the secret in the release namespace that contains the CA certificate. | Blank | - -{{}} - -##### API Connectivity Manager Dqlite Storage Configuration {#acm-dqlite-configuration} - -```yaml - - name: dqlite - existingClaim: - size: 500Mi - accessMode: ReadWriteOnce -``` - - ---- - -## Troubleshooting - - - -For guidance on how to create a support package containing system and service details to share with NGINX Customer Support, refer to the guide [Create a Support Package from a Helm Installation]({{< ref "/nms/support/k8s-support-package.md" >}}). - ---- diff --git a/content/nms/acm/how-to/devportals/_index.md b/content/nms/acm/how-to/devportals/_index.md deleted file mode 100644 index 3c4192c60..000000000 --- a/content/nms/acm/how-to/devportals/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Developer Portals -weight: 300 -url: /nginx-management-suite/acm/how-to/devportals/ ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/devportals/devportal-support-package.md b/content/nms/acm/how-to/devportals/devportal-support-package.md deleted file mode 100644 index 3dd53b1a4..000000000 --- a/content/nms/acm/how-to/devportals/devportal-support-package.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: This guide explains how to generate a support package for troubleshooting - error scenarios. -nd-docs: DOCS-1259 -title: Create an NGINX Developer Portal Support Package -toc: true -weight: 300 -type: -- reference -- how-to ---- - -{{< shortversions "1.0.0" "latest" "acmvers" >}} -## Overview - -The support package script can be used to collect information about your system for troubleshooting and debugging issues. - -The script collects system and service information and then packages the data into a tar archive, which you can share with [NGINX Customer Support]({{< ref "/nms/support/contact-support.md" >}}). - -## Usage - -The NGINX Developer Portal installer copies the `support-package.sh` script to the following location: `/etc/nginx-devportal/scripts/support-package.sh`. - -To create a support package: - -1. Run the support package script. The script requires root privileges to run. - - ``` bash - sudo bash /etc/nginx-devportal/scripts/support-package.sh - ``` - - The support package is saved in the same location from where the script was run. - - (Optional) If you use a different NGINX Developer Portal config file than the default `/etc/nginx-devportal/devportal.conf` file, run the support package script with the `-c` flag and specify the path to your config file: - - ```bash - sudo bash /etc/nginx-devportal/scripts/support-package.sh -c /your/config.conf - ``` - -2. To extract the package, use the `tar` command: - - ```bash - tar -xvf support-pkg-.tar.gz - ``` - -{{< call-out "note" >}} -The supported shell is `bash`. -{{< /call-out >}} - -### Arguments - -The following table lists the arguments you can use with the support package script. - -{{}} - -| Short | Long | Description | Example | Default | -| ----- | ------------------------- | ------------------------------------------------------------------- | ---------------------------------------- | ------------------------------------- | -| `-h` | `--help` | Prints information about the script arguments to stdout. | `--help` | N/A | -| `-o` | `--output_dir` | The output directory where the tar archive is saved. | `-o ~/output` | `$(pwd)` | -| `-c` | `--devportal_config_path` | The path to the NGINX Developer Portal config file. | `-c /etc/nginx-devportal/devportal.conf` | `/etc/nginx-devportal/devportal.conf` | -| `-m` | `--devportal_log_path` | The directory where the NGINX Developer Portal log file is located. | `-m /var/log/nginx-devportal.log` | `/var/log/nginx-devportal.log` | - -{{}} - -## Package Contents - -The support package includes several directories containing information about the system, service, and database state. - -The information included is based on the F5 NGINX products installed and configured. - -### devportal-logs - -The logs of the NGINX Developer Portal process. - -### service-information - -Information about the NGINX Developer Portal service running on the host. For the `nginx-devportal` process, the script collects: - -- `journalctl` (10000 most recent rows) -- `systemctl status` - -### system-information - -The status and state information of the host running NGINX Developer Portal, including the following: - -- System metrics (memory usage, CPU usage, etc.) -- File permissions of the Developer Portal -- Firewall or SELinux state -- Network interfaces -- Network information (hostname, iptables) -- Environment variables -- Disk usage of select directories -- Operating system version -- Installed Developer Portal version - -### database snapshot - -The support package script uses the `-c` flag ( or `--devportal_config_path`) to get the NGINX Developer Portal configuration. If the configuration file is not specified, the script uses the default value `/etc/nginx-devportal/devportal.conf`. - -As the NGINX Developer Portal supports both SQLite & PostreSQL database types, the support package script will determine the database settings from the `devportal.conf` configuration file. - -{{< call-out "note" >}} -The NGINX Developer Portal support package script will try to utilize the relevant data backup tool for the database type used. For example, the `sqlite3` binary will be needed in your path to allow a SQLite data dump to occur. Similarly for PostgreSQL the `pg_dump` tool will be required. If the relevant data dump tool is not currently found in the systems `$PATH`, an error will be logged to the console. -{{< /call-out >}} diff --git a/content/nms/acm/how-to/devportals/installation/_index.md b/content/nms/acm/how-to/devportals/installation/_index.md deleted file mode 100644 index 3ae46efff..000000000 --- a/content/nms/acm/how-to/devportals/installation/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Install the Developer Portal -weight: 100 ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md b/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md deleted file mode 100644 index 0fde20a0e..000000000 --- a/content/nms/acm/how-to/devportals/installation/configure-devportal-helm-options.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: 'This guide lists and describes the parameters you can set when deploying - the Developer Portal from a Helm chart. ' -nd-docs: DOCS-1171 -title: Deployment Options for Developer Portal Helm -toc: true -weight: 25 -type: -- how-to ---- - -{{< shortversions "1.3.0" "latest" "acmvers" >}} - -## Default Developer Portal Helm Settings {#default-devportal-helm-settings} - -This topic lists the default values that are used when [installing the Developer Portal from a Helm chart]({{< ref "/nim/deploy/kubernetes/deploy-using-helm.md" >}}). You can change these values to meet your specific needs. - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configuration-options.md" >}} - ---- - -## Common Deployment Configurations {#common-deployment-configs} - -### Deploy Developer Portal with an SQLite database - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-helm-devportal-sqlite.md" >}} - -### Deploy Developer Portal with an embedded PostgreSQL database - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-embedded-postgres.md" >}} - -### Deploy Developer Portal with an external PostgreSQL database - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-external-postgres.md" >}} - -### Deploy Developer Portal using TLS for the backend API service - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-api-mtls.md" >}} diff --git a/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md b/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md deleted file mode 100644 index cbe41c2cf..000000000 --- a/content/nms/acm/how-to/devportals/installation/devportal-helm-chart.md +++ /dev/null @@ -1,455 +0,0 @@ ---- -description: Follow the steps in the guide to deploy the API Connectivity Manager - Developer Portal to Kubernetes using a Helm chart. -nd-docs: DOCS-1110 -title: Deploy the Developer Portal from a Helm chart -toc: true -weight: 20 -type: -- tutorial ---- - -{{< shortversions "1.3.0" "latest" "acmvers" >}} - -## Overview - -Follow the steps in this section to install, upgrade, or uninstall the API Connectivity Manager Developer Portal on Kubernetes using Helm. - ---- - -## Before You Begin - -To complete the steps in this section, you need the following: - -- A working knowledge of Docker and how to build and extend containers -- An installed, licensed, and running version of API Connectivity manager -- A installed version of Helm v3.10.0 or newer -- An [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to -- Your F5 NGINX Plus certificate and key files, which you can download from [MyF5](https://my.f5.com/manage/s/) - -{{< call-out "note" >}} - -- Take a few minutes to review the [Configurable Helm Settings](#configuration-options) at the end of this topic. You can change these settings to customize your installation to meet your needs. - -- Check out the [Deployment Patterns for Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/configure-devportal-backend.md" >}}) topic if you're considering installing the Developer Portal on a single host or on a cluster for high availability. - -{{< /call-out>}} - ---- - -## Download the Developer Portal Container Images {#download-devportal-api-image} - -1. On the [MyF5 website](https://my.f5.com/manage/s/downloads), select **Resources > NGINX Downloads**. -2. In the NGINX products list, select **NGINX API Connectivity Manager**. -3. Select the following download options. Pick the version that you require; in this guide, we've chosen 1.3.0 as an example: - - **Product version:** 1.3.0 - **Linux distribution:** Ubuntu - **Distribution Version:** 20.04 - **Architecture:** amd64 - -4. Download the `nginx-devportal-api--img.tar.gz` file. -5. Download the `nginx-devportal-apigw--img.tar.gz` file. - {{< call-out "note" >}} - If you require a version of NGINX Plus other than what is provided, please see the optional section on [building the API Gateway Container Image](#build-apigw-docker-image). - {{< /call-out >}} - ---- - -## Load Docker Images {#load-docker-image} - -1. Change to the directory where you downloaded the Docker images: - - ``` shell - cd - ``` - -2. Load the Docker image: - - ``` shell - docker load -i nginx-devportal-api--img.tar.gz - docker load -i nginx-devportal-apigw--img.tar.gz - ``` - - The output looks similar to the following: - - ``` shell - $ docker load -i nginx-devportal-api--img.tar.gz - f4373956a745: Loading layer [==================================================>] 2.171MB/2.171MB - 95de16926adc: Loading layer [==================================================>] 15.62MB/15.62MB - Loaded image: nginx-devportal-api:1.5.0 - $ docker load -i nginx-devportal-apigw--img.tar.gz - 0e2737d1d5b7: Loading layer [==================================================>] 1.097MB/1.097MB - 2b64694bf95f: Loading layer [==================================================>] 83.19MB/83.19MB - 1e8cac41ce82: Loading layer [==================================================>] 2.56kB/2.56kB - Loaded image: nginx-devportal-apigw:1.5.0-r28 - ``` - - {{< call-out "important" >}} - Take note of the loaded image's name and tag. You'll need to reference this information in the next section when pushing the image to your private registry. - - In the example output above, `nginx-devportal-api` is the image name and `1.5.0` is the tag for the first image. For the second image `nginx-devportal-apigw` is the image name and `1.5.0-r28` is the tag (where `1.5.0` is the release version and `r28` is the NGINX Plus version). The image names or tags could be different depending on the product version you downloaded from MyF5. - {{< /call-out >}} - -### (Optional) Build the API Gateway Container Image {#build-apigw-docker-image} - {{< call-out "note" >}} - This is step is only required for versions of API Connectivity Manager Developer Portal prior to `1.5.0` or if you require a specific release of NGINX Plus that is not provided on MyF5. - {{< /call-out >}} -
      - Build the API Gateway Container Image - The Developer Portal Helm chart requires a container image that includes the NGINX Plus service and NGINX Agent in order to deploy the chart and have the API Gateway register with the API Connectivity Manager control plane. - -In this example, we use Ubuntu (focal), but other supported distributions can be used. - -
      - Supported Linux distributions - -{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} - -
      - -Create a Dockerfile similar to the following example: - -1. Create a Dockerfile similar to the following example: - -
      - Example Dockerfile - - {{< icon "download" >}} {{< link "/acm/containers/devportal/Dockerfile" "Download example Dockerfile" >}} - - ```Dockerfile - FROM ubuntu:focal - - # NGINX Plus release e.g 27 - ARG NGINX_PLUS_VERSION - - # DEVPORTAL release e.g 1.3.0 - ARG DEVPORTAL_UI_VERSION - - ARG CONTROL_PLANE_IP - - # Install NGINX Plus - RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ - --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ - set -ex \ - && apt-get update \ - && apt-get upgrade -y \ - && apt-get install --no-install-recommends --no-install-suggests -y \ - curl \ - gnupg \ - ca-certificates \ - apt-transport-https \ - lsb-release \ - procps \ - && \ - NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \ - for server in \ - hkp://keyserver.ubuntu.com:80 \ - pgp.mit.edu; do \ - echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ - gpg --keyserver "$server" \ - --recv-keys "$NGINX_GPGKEY" \ - && break; \ - done \ - # Configure APT repos - && gpg --export "$NGINX_GPGKEY" > /etc/apt/trusted.gpg.d/nginx.gpg \ - && printf "Acquire::https::pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";\n" >> /etc/apt/apt.conf.d/90pkgs-nginx \ - && printf "Acquire::https::pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";\n" >> /etc/apt/apt.conf.d/90pkgs-nginx \ - && printf "deb https://pkgs.nginx.com/plus/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ - && printf "deb https://pkgs.nginx.com/nms/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) nginx-plus\n" > /etc/apt/sources.list.d/nms.list \ - && apt-get update \ - # Install NGINX Plus & agent\ - && apt-get install -y \ - nginx-plus=${NGINX_PLUS_VERSION}* \ - nginx-plus-module-njs=${NGINX_PLUS_VERSION}* \ - nginx-devportal-ui=${DEVPORTAL_UI_VERSION}* \ - && curl --insecure https://$CONTROL_PLANE_IP/install/nginx-agent | PACKAGE_HOST=${CONTROL_PLANE_IP} sh \ - # Forward request and error logs to docker log collector \ - && ln -sf /dev/stdout /var/log/nginx/access.log \ - && ln -sf /dev/stderr /var/log/nginx/error.log \ - # Cleanup \ - && apt-get autoremove --purge -y \ - curl \ - gnupg \ - apt-transport-https \ - lsb-release \ - && rm -rf /root/.gnupg \ - && rm -rf /etc/apt/sources.list.d/nginx-plus.list /etc/apt/sources.list.d/nms.list /etc/apt/apt.conf.d/90pkgs-nginx \ - && rm -rf /var/lib/apt/lists/* - - COPY /entrypoint.sh / - - STOPSIGNAL SIGTERM - - CMD bash /entrypoint.sh - ``` - -
      - -
      - -2. Add an `entrypoint.sh` file similar to the following example to the same directory where you added the Dockerfile: - -
      - Example entrypoint.sh - - {{< icon "download" >}} {{< link "/acm/containers/devportal/entrypoint.sh" "Download example entrypoint.sh file" >}} - - ```bash - #!/bin/bash - - set -euxo pipefail - - handle_term() - { - echo "received TERM signal" - echo "stopping nginx-agent ..." - kill -TERM "${agent_pid}" 2>/dev/null - echo "stopping nginx ..." - kill -TERM "${nginx_pid}" 2>/dev/null - } - - trap 'handle_term' TERM - - if [ -z "${CONTROL_PLANE_IP}" ]; then - echo "ERROR CONTROL_PLANE_IP environment variable needs to be set." - exit 1 - fi - - if [ -z "${INSTANCE_GROUP}" ]; then - echo "ERROR INSTANCE_GROUP environment variable needs to be set." - exit 1 - fi - - # Launch nginx - echo "starting nginx ..." - nginx -g "daemon off;" & - - nginx_pid=$! - - # start nginx-agent, pass args - echo "starting nginx-agent ..." - nginx-agent --instance-group "${INSTANCE_GROUP}" --server-host "${CONTROL_PLANE_IP}" & - - agent_pid=$! - - wait_term() - { - wait ${agent_pid} - trap - TERM - kill -QUIT "${nginx_pid}" 2>/dev/null - echo "waiting for nginx to stop..." - wait ${nginx_pid} - } - - wait_term - - echo "nginx-agent process has stopped, exiting." - ``` - -
      - -
      - -3. Add your NGINX Plus certificate and key files to the same directory as the Dockerfile. You can download these files from the [MyF5](https://my.f5.com/manage/s/) site. - -4. Build the Dockerfile and specify the following settings: - - - `CONTROL_PLANE_IP`: The IP address or hostname of your API Connectivity Manager control plane host - - `NGINX_PLUS_VERSION`: The version of NGINX Plus that you want to use; for example, `28` - - `DEVPORTAL_UI_VERSION`: The version of the Developer Portal UI that you want to use; for example, `1.5.0` - - ```bash - export CONTROL_PLANE_IP= - export NGINX_PLUS_VERSION= - export DEVPORTAL_UI_VERSION= - export DEVPORTAL_UI_TAG=${DEVPORTAL_UI_VERSION}-r${NGINX_PLUS_VERSION} - export DOCKER_BUILDKIT=1 - docker build \ - -t nginx-devportal-apigw:$DEVPORTAL_UI_TAG \ - --build-arg CONTROL_PLANE_IP \ - --build-arg NGINX_PLUS_VERSION \ - --build-arg DEVPORTAL_UI_VERSION \ - --secret id=nginx-crt,src=nginx-repo.crt \ - --secret id=nginx-key,src=nginx-repo.key \ - . - ``` - -
      - ---- - -## Push Images to Private Registry {#push-images-private-registry} - -{{< call-out "note" >}} -To complete this step, you need an [externally-accessible private Docker registry](https://docs.docker.com/registry/deploying/) to push the container images to. -{{< / call-out >}} - -After building or loading the Docker images, you can now tag and push the images to your private Docker registry. Replace `` in the examples below with the path to your private Docker registry. - -1. Log in to your private registry: - - ```shell - docker login - ``` - -2. Tag the images with the values you noted when completing the [Load Docker Images](#load-docker-image) steps above. - - ```shell - docker tag nginx-devportal-apigw: /nginx-devportal-apigw: - docker tag nginx-devportal-api: /nginx-devportal-api: - ``` - -3. Push the images to your private registry: - - ```shell - docker push /nginx-devportal-apigw: - docker push /nginx-devportal-api: - ``` - ---- - -## Add Helm Repository {#add-helm-repository} - -Run the following commands to install the NGINX Management Suite chart from the Helm repository: - -```shell -helm repo add nginx-stable https://helm.nginx.com/stable -helm repo update -``` - -The first command, `helm repo add nginx-stable https://helm.nginx.com/stable`, adds the `nginx-stable` repository to your local Helm repository list. This repository contains the Helm charts for deploying NGINX Management Suite. - -The second command, `helm repo update`, updates the local Helm repository list with the newest versions of the charts from the `nginx-stable` repository. This command ensures you have the most up-to-date version of the charts available for installation. - ---- - -## Configure Chart to Pull from Private Docker Registry {#configure-chart} - -A Helm `values.yaml` file is a configuration file you can use to customize the installation of a Helm chart without actually editing the chart itself, allowing for faster and more efficient deployments. Values can be used to specify different image repositories and tags, set environment variables, configure resource requests and limits, and more. - -1. Create a `values.yaml` file similar to the following example. This file is used to customize the configuration of the NGINX Developer Portal chart located in the `nginx-stable` Helm repository that you [added above](#add-helm-repository). - - ```yaml - # values.yaml - imagePullSecrets: - - name: regcred - apigw: - acmService: - enabled: true - type: LoadBalancer - image: - repository: /nginx-devportal-apigw - tag: - controlPlane: - host: - instanceGroup: - service: - type: LoadBalancer - api: - image: - repository: /nginx-devportal-api - tag: - db: - type: - acm: - client: - caSecret: - name: acm-tls - key: ca.crt - - ``` - - - Replace `` with your private Docker registry. - - Replace `` with the tag you used when [pushing the images to your private registry](#push-images-private-registry). - - In the `imagePullSecrets` section, add the credentials for your private Docker registry. - - {{< call-out "note" >}}The contents of `api.acm.client.caSecret.key` can be obtained from the `/etc/nms/certs/apigw/ca.pem` on the control plane.{{< /call-out >}} - - This `values.yaml` file specifies the Docker images to be used for the NGINX Developer Portal `apigw` and `api` components, including the repository (``) and tag (`version`) of each image. It also specifies that a secret called `regcred` should be used for image pulls. - - {{< call-out "note" >}}For instructions on creating a secret, see the Kubernetes topic [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).{{< /call-out>}} - -2. Save and close the `values.yaml` file. - -## Install the Chart - -The Developer Portal does not require (although it is recommended) a dedicated namespace for the data plane. You can create this namespace yourself, or you can allow Helm to create it for you by using the `--create-namespace` flag when installing. - -{{< call-out "note" >}} -If persistent storage is not configured in your cluster, set the `apigw.persistence.enabled` and `api.persistence.enabled` values to `false` either in the values file or using the `--set` helm commands. -{{< /call-out >}} - -To install the chart with the release name `devportal` and namespace `devportal`, run the following command: - -```bash -helm install devportal nginx-stable/nginx-devportal --namespace devportal --create-namespace -f [--version ] --wait -``` - ---- - -## Upgrade the Chart {#upgrade-the-chart} - -You can upgrade to the latest Helm chart from the version immediately before it. For example, you can upgrade from v1.3.0 to v1.3.1. - -### Upgrade the Release - -To upgrade the release `devportal` in the `devportal` namespace, run the following command: - -```bash -helm upgrade devportal nginx-stable/nginx-devportal --namespace devportal -f [--version ] --wait -``` - -### Change Configuration Options - -You can use the `helm upgrade` command to change or apply additional configurations to the release. - -To change a configuration, use `--set` commands or `-f `, where `my-values-file` is a path to a values file with your desired configuration. - ---- - -## Uninstall the Chart - -To uninstall and delete the release `devportal` in the `devportal` namespace, take the following step: - -```bash -helm uninstall devportal --namespace devportal -``` - -This command removes all of the Kubernetes components associated with the Developer Portal release. The namespace is not deleted. - ---- - -## Configurable Helm Settings {#configuration-options} - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configuration-options.md" >}} - -
      - -## Common Deployment Configurations - -Select from the following options to view some of the commonly used configurations for the Developer Portal. To apply these configurations, edit the `values.yaml` file as needed. - -### Deploy Developer Portal with an SQLite database - -{{< call-out "note" >}} -This configuration is recommended for proof of concept installations and not for production deployments. -{{< /call-out >}} - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-helm-devportal-sqlite.md" >}} - -### Deploy Developer Portal with an embedded PostgreSQL database - -{{< call-out "note" >}} -This configuration is recommended for proof of concept installations and not for production deployments. -{{< /call-out >}} - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-embedded-postgres.md" >}} - -### Deploy Developer Portal with an external PostgreSQL database - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-external-postgres.md" >}} - -### Deploy Developer Portal using TLS for the backend API service - -{{< include "installation/helm/acm/dev-portal-helm-configurations/configure-devportal-helm-api-mtls.md" >}} diff --git a/content/nms/acm/how-to/devportals/installation/install-dev-portal.md b/content/nms/acm/how-to/devportals/installation/install-dev-portal.md deleted file mode 100644 index f4eb76501..000000000 --- a/content/nms/acm/how-to/devportals/installation/install-dev-portal.md +++ /dev/null @@ -1,264 +0,0 @@ ---- -description: Follow the steps in this guide to install or upgrade the Developer Portal - for F5 NGINX Management Suite API Connectivity Manager. -nd-docs: DOCS-1214 -title: Install or Upgrade the Developer Portal -toc: true -weight: 10 -type: -- tutorial ---- - ---- - -## Platform Requirements {#acm-devportal-requirements} - -{{< call-out "important" >}}To run the Developer Portal, you need a **dedicated** Linux host specifically for this purpose. **Do not** install the Developer Portal on a host that is currently serving as a management or data plane.{{< /call-out >}} - -Complete the following steps to prepare the Developer Portal for use with API Connectivity Manager: - -1. [Install F5 NGINX Plus R24 or later](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) -2. [Install NGINX njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) - -
      - Supported Linux distributions - -{{< include "tech-specs/acm-dev-portal-supported-distros.md" >}} - -
      - -
      - ---- - -## Prerequisites - -### Add NGINX Management Suite Repository {#add-yum-apt} - -{{< include "installation/add-nms-repo.md" >}} - -### Install PostgreSQL or SQLite - -The Developer Portal requires a PostgreSQL or SQLite database to store configuration settings and analytics information. - -Select the tab for the database you want to use, then follow the installation instructions. - -{{}} - -{{%tab name="PostgreSQL"%}} - -To use PostgreSQL for the Developer Portal database, take the following steps: - -1. Install PostgreSQL: - - - CentOS, RHEL, RPM-based: - - ```bash - sudo yum install -y postgresql-server - sudo postgresql-setup initdb - ``` - - - Debian, Ubuntu, Deb-based: - - ```bash - sudo apt-get install -y postgresql - ``` - -2. Configure the PostgreSQL host-based authentication (HBA) file: - - - CentOS, RHEL, RPM-based: - - ``` bash - cat << EOF | sudo tee /var/lib/pgsql/data/pg_hba.conf - - # TYPE DATABASE USER ADDRESS METHOD - - local all postgres peer - local all all md5 - # IPv4 local connections: - host all all 127.0.0.1/32 md5 - # IPv6 local connections: - host all all ::1/128 md5 - EOF - ``` - - - Debian, Ubuntu, Deb-based: - - ``` bash - cat << EOF | sudo tee /etc/postgresql//main/pg_hba.conf - - # TYPE DATABASE USER ADDRESS METHOD - - local all postgres peer - local all all md5 - # IPv4 local connections: - host all all 127.0.0.1/32 md5 - # IPv6 local connections: - host all all ::1/128 md5 - EOF - ``` - -3. Restart PostgreSQL: - - ``` bash - sudo systemctl restart postgresql - ``` - -4. Create the `devportal` database, add the `nginxdm` user, and assign privileges: - - ```bash - sudo -u postgres createdb devportal - sudo -u postgres psql -c "CREATE USER nginxdm WITH LOGIN PASSWORD 'nginxdm';" - sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE devportal TO nginxdm;" - ``` - -{{%/tab%}} - -{{%tab name="SQLite"%}} - -To use SQLite for the Developer Portal database, run the following commands: - -```bash -echo 'DB_TYPE="sqlite"' | sudo tee -a /etc/nginx-devportal/devportal.conf -echo 'DB_PATH="/var/lib/nginx-devportal"' | sudo tee -a /etc/nginx-devportal/devportal.conf -``` - -
      - -{{%/tab%}} - -{{
      }} - ---- - -## Install the Developer Portal - -{{}} -{{%tab name="CentOS, RHEL, RPM-Based"%}} - -1. To install the Developer Portal, run the following command: - - ```bash - sudo yum -y install nginx-devportal nginx-devportal-ui - ``` - -{{%/tab%}} - -{{%tab name="Debian, Ubuntu, Deb-Based"%}} - -1. To install the Developer Portal, run the following commands: - - ```bash - sudo apt-get update - sudo apt-get -y install nginx-devportal nginx-devportal-ui - ``` - -{{%/tab%}} -{{}} - -3. Enable the Developer Portal service: - - ```bash - sudo systemctl enable nginx-devportal.service - ``` - -2. Start the Developer Portal service: - - ```bash - sudo systemctl start nginx-devportal.service - ``` - - --- - -## Upgrade the Developer Portal - -{{}} -{{%tab name="CentOS, RHEL, RPM-Based"%}} - -1. To install the latest version of the Developer Portal, run the following command: - - ```bash - sudo yum update -y nginx-devportal nginx-devportal-ui - ``` - -{{%/tab%}} - -{{%tab name="Debian, Ubuntu, Deb-Based"%}} - -1. To install the latest version of the Developer Portal, run the following commands: - - ```bash - sudo apt-get update - sudo apt-get upgrade -y nginx-devportal nginx-devportal-ui - ``` - -{{%/tab%}} -{{}} - -2. Enable the Developer Portal service: - - ```bash - sudo systemctl enable nginx-devportal.service - ``` - -3. Restart the Developer Portal service: - - ```bash - sudo systemctl restart nginx-devportal.service - ``` - ---- - -## Secure Developer Portal API communication - -Depending on your [deployment pattern for the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/configure-devportal-backend.md" >}}), you may have either a single host installation(default) or a multi-host installation for high availability. We recommend using mTLS for the communication between the NGINX reverse proxy and the Developer Portal APIs to provide maximum security. - -1. On the Developer Portal Service host or hosts, edit the Dev Portal configuration file located at `/etc/nginx-devportal/devportal.conf` -1. Add the location of the server certificate and certificate key, as shown in the example below. - - ```yaml - CERT_FILE="/path/to/devportal-server.crt" - KEY_FILE="/path/to/devportal-server.key" - INSECURE_MODE=false - CA_FILE="/path/to/ca.pem" # If using mTLS - CLIENT_VERIFY=true # If using mTLS - ``` - -1. Adjust the permissions of each of the certificate and key files provided to ensure they are readable by the Dev Portal backend service. -1. Restart the developer portal backend service: - - ```shell - sudo systemctl restart nginx-devportal - ``` - -1. If mTLS is configured on your Developer Portal service, you must add a TLS Backend Policy to both; - - The Developer Portal Cluster (Used for communication from users to the Developer Portal API) - - The Developer Portal Internal Cluster (For communication from the API Connectivity Manager to your Devportal Portal API to publish and maintain information) -{{< call-out "note" >}} -To add a TLS Backend Policy to both clusters. Follow the [TLS Policies]({{< ref "/nms/acm/how-to/policies/tls-policies.md#add-tls-listener" >}}) documentation. -{{< /call-out >}} - ---- - -## Secure communication from the Developer Portal to NGINX Management Suite host with mTLS - -For complete Developer Portal functionality, such as the ability to create credentials from the Developer Portal, mTLS must be added for server-to-server communication. - -Follow the steps below to make sure NGINX Management Suite host can verify the client certificates provided by the Developer Portals backend service. - -1. Edit the NGINX Management Suite configuration file located at `/etc/nginx/conf.d/nms-http.conf`. -1. Add the location of the CA PEM file to the `ssl_client_certificate` directive, as shown in the example below: - - ```yaml - ssl_certificate /etc/nms/certs/manager-server.pem; - ssl_certificate_key /etc/nms/certs/manager-server.key; - ssl_client_certificate /etc/nms/certs/ca.pem; - ``` - -1. Reload the NGINX configuration: - - ```shell - sudo nginx -s reload - ``` - -1. Follow the steps in the [TLS Policies]({{< ref "/nms/acm/how-to/policies/tls-policies.md#/#tls-internal-cluster" >}}) documentation to add TLS policies that will enforce mTLS using these the correct client keys to connect to the NGINX Management Suite host. diff --git a/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md b/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md deleted file mode 100644 index da52cc892..000000000 --- a/content/nms/acm/how-to/devportals/installation/install-devportal-offline.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -description: Complete the steps in this guide to install the Developer Portal directly - from package files in environments without Internet access. -nd-docs: DOCS-1215 -title: Install the Developer Portal in an Offline Environment -toc: true -weight: 30 -type: -- tutorial ---- - -{{< dev-portal-dedicated-host >}} - - -## Prerequisites - -The Developer Portal requires [PostgreSQL](https://www.postgresql.org), [F5 NGINX Plus R24](https://docs.nginx.com/nginx/) or later, and [njs](https://nginx.org/en/docs/njs/). - -### PostgreSQL - -You can install the PostgreSQL package from your distribution’s repo at the same time you install the operating system. Refer to the the [PostgreSQL download guide](https://www.postgresql.org/download/) for instructions. - -### NGINX Plus and njs - -To install NGINX Plus and njs, take the following steps on the Developer Portal host: - -1. Log in to MyF5 and download your `nginx-repo.crt` and `nginx-repo.key` files. -2. Copy the `nginx-repo.crt` and `nginx-repo.key` files to the `/etc/ssl/nginx/` directory: - - ```bash - sudo cp nginx-repo.crt /etc/ssl/nginx/ - sudo cp nginx-repo.key /etc/ssl/nginx/ - ``` - -3. Select the following link to download the `fetch-external-acm-dataplane-dependencies.sh` script. This script downloads the necessary NGINX Plus and njs packages to a `tar.gz` archive. - - {{}} {{}} - -4. To download the NGINX Plus and njs dependencies, run the `fetch-external-acm-dataplane-dependencies.sh` script. As parameters, specify your Linux distribution and the location of your `nginx-repo.crt` and `nginx-repo.key` files. - - ```bash - sudo bash fetch-external-acm-dataplane-dependencies.sh /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key - ``` - - Supported Linux distributions: - - - `ubuntu18.04` - - `ubuntu20.04` - - `ubuntu22.04` - - `debian10` - - `debian11` - - `centos7` - - `rhel7` - - `rhel8` - - `rhel9` - - `amzn2` - - For example, to download external dependencies for Ubuntu 20.04: - - ```bash - sudo bash fetch-external-acm-dataplane-dependencies.sh ubuntu20.04 /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key - ``` - - In this example, the script creates an archive called `acm-dataplane-dependencies-ubuntu20.04.tar.gz` with the external dependencies. - -5. After you copy and extract the bundle onto your target machine, take the following steps to install the packages: - - {{< call-out "note" >}}The bundled NGINX Plus package may conflict with installed versions of NGINX Plus. Delete the package from the bundle if you want to keep the existing version.{{< /call-out >}} - - {{}} - {{%tab name="CentOS, RHEL, and RPM-Based"%}} - -```bash -tar -kzxvf acm-dataplane-dependencies-.tar.gz -sudo yum localinstall *.rpm -``` - - {{%/tab%}} - {{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -```bash -tar -kzxvf acm-dataplane-dependencies-.tar.gz -sudo dpkg -i ./*.deb -``` - -{{%/tab%}} -{{}} - ---- - -## Install the Developer Portal - -{{}} -{{%tab name="CentOS, RHEL, and RPM-Based"%}} - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the NGINX Developer Portal package files. - -2. Install the NGINX Developer Portal packages: - - ```bash - sudo yum -y --nogpgcheck install /home/user/nginx-devportal-.x86_64.rpm - sudo yum -y --nogpgcheck install /home/user/nginx-devportal-ui-.x86_64.rpm - ``` - -{{%/tab%}} -{{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the NGINX Developer Portal package files. - -2. Install the NGINX Developer Portal package: - - ```bash - sudo apt-get -y install -f /home/user/nginx-devportal__amd64.deb - sudo apt-get -y install -f /home/user/nginx-devportal-ui__amd64.deb - ``` - -{{%/tab%}} -{{}} - -3. Enable the Developer Portal service: - - ```bash - sudo systemctl enable nginx-devportal.service - ``` - -4. Start the Developer Portal service: - - ```bash - sudo systemctl restart nginx-devportal.service - ``` - ---- - -## Upgrade the Developer Portal - -To upgrade the Developer Portal in an offline environment, take the following steps: - -{{}} -{{%tab name="CentOS, RHEL, and RPM-Based"%}} - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the Developer Portal package files. - -2. Upgrade the Developer Portal packages: - - ```bash - sudo yum -y --nogpgcheck update /home/user/nginx-devportal_.x86_64.rpm - sudo yum -y --nogpgcheck update /home/user/nginx-devportal-ui_.x86_64.rpm - ``` - -{{%/tab%}} -{{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the Developer Portal package files. - -2. Upgrade the Developer Portal packages: - - ```bash - sudo apt-get -y install -f /home/user/nginx-devportal__amd64.deb - sudo apt-get -y install -f /home/user/nginx-devportal-ui__amd64.deb - ``` - -{{%/tab%}} -{{}} - -3. Enable the following Developer Portal service: - - ```bash - sudo systemctl enable nginx-devportal.service - ``` - -4. Restart the Developer Portal service: - - ```bash - sudo systemctl restart nginx-devportal.service - ``` diff --git a/content/nms/acm/how-to/devportals/publish-to-devportal.md b/content/nms/acm/how-to/devportals/publish-to-devportal.md deleted file mode 100644 index 639cb8c99..000000000 --- a/content/nms/acm/how-to/devportals/publish-to-devportal.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: This document provides instructions on how to publish API documentation - and API proxies to a Developer Portal in order to make them available at a designated - hostname. -nd-docs: DOCS-1082 -title: Publish Docs to a Developer Portal -toc: true -weight: 200 -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - ---- - -## Overview - -This document will guide you through the process of publishing API documentation and API proxies to a Developer Portal. You will find instructions on how to add an API spec file, publish API documentation and the associated API proxy, or publish API documentation only. After completing these steps, you should be able to access your API and documentation at the designated hostname. - ---- - -## Before You Begin - -To complete the steps in this guide, you need the following: - -- [API Connectivity Manager is installed]({{< ref "/nim/deploy/_index.md" >}}) and running -- One or more environments with a [configured Developer Portal]({{< ref "/nms/acm/getting-started/add-devportal.md" >}}) -- (Optional) [Customize the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) - ---- - -## How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - ---- - -## Add an API Doc - -1. On the sidebar, select **Services**. -1. Select your workspace. -1. Select **API Docs > Add API Doc**. -1. Browse your local filesystem and select the API Spec in YAML or JSON format that you'd like to upload. -1. Select **Save**. - -## Publish the API Documentation and API Proxy - -1. Select **Services** on the sidebar. -1. Select your workspace from the **Workspaces** list. -1. On the **API Proxies** section, select **Publish to Proxy**. -1. On the **Name** box, type the name for the backend service. `-svc` will be added to the name automatically. -1. On the **Service Target Hostname**, type the hostname for the Service Target. -1. On the **API Proxy** section, the **Name** box is automatically filled. -1. On the **API Spec** section, select your spec using the list. -1. Select your **Gateway Proxy Hostname** using the list. -1. Confirm the **Base Path** and **Version** on the **Ingress** section. Update the default values if needed. -1. Check the **Also publish API to developer portal** box on the **Developer Portal** section. -1. Select the **Portal Proxy Hostname** using the list. -1. Select **Publish** - -The API and documentation should now be available at the hostname provided for the Developer Portal proxy. - -## Publish the API Documentation Only - -Take the steps below to publish just the API documentation. - -1. Select **Services** on the sidebar. -1. Select **Publish API Doc** from the **Actions** menu. -1. In the **Name** box, type the name for your API Doc. -1. On the **API Spec** section, select your spec using the list. -1. Select the **Portal Proxy Hostname** using the list. -1. Confirm the **Base Path** and **Version** on the **Ingress** section. Update the default values if needed. -1. Select the **Enter an external Hostname** option. -1. On the **External Hostname** section, provide the hostname for your external Target Proxy. -1. Select **Save**. diff --git a/content/nms/acm/how-to/infrastructure/_index.md b/content/nms/acm/how-to/infrastructure/_index.md deleted file mode 100644 index f1345f39b..000000000 --- a/content/nms/acm/how-to/infrastructure/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Infrastructure -weight: 100 -url: /nginx-management-suite/acm/how-to/infrastructure/ ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md b/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md deleted file mode 100644 index f45986532..000000000 --- a/content/nms/acm/how-to/infrastructure/configure-devportal-backend.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -description: Learn how to support various deployment patterns for Developer Portal. -nd-docs: DOCS-955 -title: Deployment Patterns for Developer Portal -toc: true -weight: 200 ---- - -{{< shortversions "1.2.0" "latest" "acmvers" >}} - -## Overview - -The Developer Portal application is a combination of a portal application (Developer Portal UI) and a backend API service (Developer Portal API service) to support the application. - -The following deployment patterns are supported: - -- Developer Portal UI and API service deployed on a single host (default). -- Load-balanced backend API using multiple IP addresses. Developer Portal UI and API service deployed on different hosts. -- Load-balanced backend API using a single hostname. Developer Portal UI and API service deployed on different hosts using a single hostname and frontend by a load balancer. - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with a [Developer Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) cluster. -- You have verified that you can access the Developer Portal using the configured hostname. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - ---- - -## Single Host Installation - -With the localhost installation, which is the default setup, both the backend and UI Developer Portal binaries are installed on the same machine. The backend API service is on the localhost, port 8080, by default. - -
      -{{< img src="acm/deployment-patterns/LocalInstall.png" alt="local install" width="400" >}} -
      - ---- - -## Multi-Host Installation for High Availability - -The Developer Portal backend API service can be scaled for high availability by installing the backend binaries on multiple hosts. The Developer Portal front-end load balances the requests between multiple backend services using an IP address or an internal DNS name. - -
      -{{< img src="acm/deployment-patterns/MultipleIP.png" alt="multiple IPs" width="400" >}} -
      - -### Configure Developer Portal Backend - -When creating a Developer Portal in an environment, you can set multiple `serviceTargets` to match any of the deployment patterns above. - -1. In the API Connectivity Manager user interface, go select **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. -1. Select **Edit Advanced Config** from the **Actions** menu for the desired Developer Portal. -1. On the **Backend** tab, select the default backend service, then select **Edit Backend** from the **Actions** menu. -1. Add/Update desired service target. diff --git a/content/nms/acm/how-to/infrastructure/customize-devportal.md b/content/nms/acm/how-to/infrastructure/customize-devportal.md deleted file mode 100644 index 3a64e7aca..000000000 --- a/content/nms/acm/how-to/infrastructure/customize-devportal.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -description: Learn how to customize a Developer Portal and publish documentation using - F5 NGINX Management Suite API Connectivity Manager. -nd-docs: DOCS-900 -title: Customize a Developer Portal -toc: true -weight: 300 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -You can use API Connectivity Manager to create and manage Developer Portals (or, "Dev Portals") to host your APIs and documentation. API Connectivity Manager Dev Portals provide a framework for customization that lets you match your Dev Portal to your brand's or business' requirements. -You can customize the Dev Portal website's landing page, All APIs page, and Docs page(s), as well as the site's header and footer. - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with a [Developer Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) cluster. -- You have verified that you can access the Developer Portal using the configured hostname. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -## Customize a Developer Portal {#create-dev-portal} - -API Connectivity Manager uses a Dev Portal framework to define the look and feel of Developer Portals. These settings are applied at the Cluster level and apply to all Developer Portals hosted by the Cluster. - -Take the steps below to customize your Dev Portal by defining a custom Dev Portal framework. - -1. In the API Connectivity Manager user interface, go to **Environments > \**, where "your environment" is the Environment that contains a Developer Portal. -1. Select **Edit Portal Theme** from the **Actions** menu for the desired Developer Portal. You can then edit any of the options provided. - - - [Brand](#brand-options) - - [Style](#style-options) - - [Website](#website-options) - -1. You can save your changes at any time by selecting the **Save and Publish** option. - -{{< call-out "note" >}} -
      - -- The Preview section to the right of the settings in each section will update automatically as you make changes. -- The changes will be applied immediately when you select **Save and Publish**; as such we recommend testing any changes in a "non-production" Environment first. -{{< /call-out >}} - -## Brand Options - -### Add Custom Logo - -1. Select **Upload Image**. -1. Browse your local filesystem and select the file you'd like to upload. -1. Select **Save and Publish** or select **Next** to continue making changes. - -### Add a Favicon - -1. Select **Upload Favicon**. -1. Browse your local filesystem and select the file you'd like to upload. -1. Select **Save and Publish** or select **Next** to continue making changes. - -## Style Options - -### Colors - -You can customize the colors of the following items: - -- page background, -- page text, -- theme (buttons, etc.), and -- callouts (information, success, error, and warning). - -To customize any of the above fields: - -1. Select the field that you want to customize. -1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. -1. Select **Save and Publish** or select **Next** to continue making changes. - -### Fonts - -You can customize the font used in any of the following: - -- hero text, -- header and body text, -- code. - -To customize the font used in your Dev Portal(s): - -1. Select the field that you want to customize. -1. Choose a font from the list provided. -1. Select **Save and Publish** or select **Next** to continue making changes. - -## Website Options - -You can customize the following sections of your Dev Portal website: - -- [Header](#header-options) -- [Footer](#footer-options) -- [Homepage](#homepage-options) -- [Documentation](#add-documentation) - -### Header {#header-options} - -You can customize the header's background color, text color, and provide text to appear next to your logo image. - -To customize the background or text color: - -1. Select the field that you want to customize. -1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. - -To customize the header text, enter the text you want to use in the **Complementary logo text** field. - -Then, select **Save and Publish** or select **Next** to continue making changes. - -### Footer {#footer-options} - -You can customize the footer's background color, text color, and provide links to appear in the footer. - -To customize the background or text color: - -1. Select the field that you want to customize. -1. Enter the hex code for the desired color, or drag the sliders to create and refine a custom color. - -To add links: - -1. Select **Add Links**. -1. Provide the display text and the target URL. - -To delete links: - -1. Select the "Delete" icon for any link that you want to remove. - -Then, select **Save and Publish** or select **Next** to continue making changes. - -### Homepage {#homepage-options} - -You can customize the following options for your Dev Portal homepage: - -- Hero image/banner text and color -- "About Us" cards: The homepage features three cards, which appear below the banner. -- Steps for getting started with your API. - -To edit the hero image/banner: - -1. Select the **Edit** icon. -1. Enter your desired text for the **Title** and **Secondary Title**. -1. Select the **Background** field and/or **Ink** field, then enter the hex code for the desired color or drag the sliders to create and refine a custom color. -1. Select **Save Changes**. - -To edit the "About Us" cards: - -1. Select the **Edit** icon. -1. Enter your desired **Title** (required), **Description** (required), icon, and **Alt Text** for each card. -1. Select **Save Changes**. - -To edit the **Get Started** steps: - -1. Select the **Edit** icon. -1. Enter your desired **Title** (required), **Description** (required), icon, and **Alt Text** for each of the four steps. -1. Select **Save Changes**. - -> {{< icon "fa-solid fa-lightbulb" >}} At this point, we recommend selecting **Save and Publish** to save any customizations you've made. -> Verify that the changes have been applied, then move on to adding your [**Documentation**](#add-documentation). - -### Documentation {#add-documentation} - -#### Configure Documentation Page - -You can edit the **Documentation Page** section of your Dev Portal website to add custom documentation for your APIs. -You can add or edit up to five Markdown documents. The following placeholder pages are included by default: - -- Get Started -- Authentication -- Responses -- Errors - -To customize the **Documentation** page of your Developer Portal, take the steps below: - -1. Select the **Documentation** option in the **Edit Developer Portal** sidebar. -1. To add a new Markdown document, select **Add Page**. This adds a new blank item to the Pages table, which you can then edit and preview as described below. - - 1. Select the **Edit** icon in the Pages table. - 1. Edit the sample text, or paste your text into the editor. - 1. Select **Preview** to view the Markdown rendered as HTML. - 1. Provide a new **Page Name**, if desired. - 1. Select **Save** to save your changes. - -1. To reorder your documents, select the up or down arrow next to the Page name. - -When ready, select **Save and Publish** to save all of your changes and publish your documentation. - -#### Configure All APIs Page - -The **All APIs** page is where all of your APIs will appear on your Dev Portal site. -To customize the **Configure All APIs** page of your Developer Portal, take the steps below: - -1. Select the **Documentation** option in the **Edit Developer Portal** sidebar. -1. Select the **Edit** icon for the **Configure All APIs** section. -1. Add your **Page Description** (required). -1. Select **Upload illustration** to add a new full-width image to the page. Then, browse your local filesystem and select the file you'd like to upload. -1. To change the image background color, select the **Illustration Background** field. Then, then enter the hex code for the desired color or drag the sliders to create and refine a custom color. -1. Add the desired **Alt Text** for the image to the field provided. -1. Select **Save Changes**. - -When ready, select **Save and Publish** to save and publish your **All APIs** page changes. diff --git a/content/nms/acm/how-to/infrastructure/enable-create-credentials.md b/content/nms/acm/how-to/infrastructure/enable-create-credentials.md deleted file mode 100644 index c57d11f97..000000000 --- a/content/nms/acm/how-to/infrastructure/enable-create-credentials.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -description: Follow the steps in this guide to allow users to create credentials as - a self-service workflow on the Developer Portal. -nd-docs: DOCS-947 -title: Enable Creating Credentials on the Developer Portal -toc: true -weight: 400 -type: -- how-to ---- - -{{< raw-html >}} - -{{< /raw-html >}} -## Overview - -API Connectivity manager supports public API workflows. Public APIs are open for anyone to consume by requesting resource credentials. Resource credentials can be managed on the Developer Portal for public APIs secured with APIKey or Basic Authentication. Consumers have to log in to the Developer Portal to create credentials. Once created, credentials can be used to access APIs. Users can also use the credentials to test APIs on the Developer Portal with the **Try It Out** feature. - -### Before You Begin - -To complete the steps in this guide, you need to the following: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more environments with [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) clusters. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - ---- - -## Self-Managed Credentials Workflow - -On the Developer Portal, logged-in users can create credentials for public APIs. Since this workflow is available only for logged-in users, the OIDC policy must be applied on the Developer Portal to enable SSO with an IDP of choice. In addition, the API proxies should be secured with either Basic Authentication or APIKey authentication policy. - -The process for setting up end-to-end credentials is as follows: - -- Enable the Credentials endpoint on the API Connectivity Manager host -- Enable SSO on the Developer Portal with an OIDC policy -- Publish the API Proxy and secure it by adding an APIKey or Basic Authentication policy - -Afterward, the API consumer can create credentials on the Developer Portal by performing the following: - -- API consumer logs in to developer portal, creates org, app, and credentials for the API. -- Test the API with the **Try It Out** option and the newly created credentials. - -### Enable Create Credentials Endpoint - -As mTLS is not enabled by default, the Credentials endpoint is disabled initially. You must enable the Credentials endpoint on the API Connectivity Manager host to use the Developer Portal credentials workflow. - -{{< call-out "important" >}}mTLS is essential to secure communication between API Connectivity Manager and the Developer Portal.{{< /call-out >}} - -To enable the Credentials endpoint on the API Connectivity Manager host, take the following steps: - -1. Make sure mTLS server and client certificates have been configured for Devportal to F5 NGINX Management Suite by following these [instructions]({{< ref "/nms/acm/how-to/devportals/installation/install-dev-portal.md#secure-communication-from-the-developer-portal-to-nginx-management-suite-host-with-mtls" >}}) to add your server certs, CA file and enforce mTLS. - -1. Open an SSH connection into the API Connectivity Manager host and log in. - -1. Enable the Credentials endpoint: - - Open `/etc/nms/nginx/locations/nms-acm.conf` for editing and uncomment the location block. - - ``` yaml - # Deployment of resource credentials from the devportal - # Uncomment this block when using devportal. Authentication is disabled - # for this location. This location block will mutually - # verify the client trying to access the credentials API. - location = /api/v1/devportal/credentials { - # OIDC authentication (uncomment to disable) - #auth_jwt off; - auth_basic off; - error_page 401 /401_certs.json; - if ($ssl_client_verify != SUCCESS) { - return 401; - } - proxy_pass http://acm-api-service/api/acm/v1/devportal/credentials; - } - ``` - -1. Save the changes. - -1. Reload NGINX on the API Connectivity Manager host: - - ```bash - sudo nginx -s reload - ``` - -### Enable SSO on the Developer Portal - -1. Follow the instructions to [enable single sign-on (SSO) for the Developer Portal]({{< ref "/nms/acm/how-to/infrastructure/enable-sso-devportal.md" >}}) with an OIDC policy. - -### Publish and Secure the API Proxy - -A link to **Edit Advanced Configurations** is displayed upon publishing the API Proxy. If you want to add policies, this is where to do that. - -To add an APIKey Authentication policy: - -1. Select **Policies** in the advanced section of the menu. -2. Select **Add Policy** for the APIKey Authentication policy, then complete the required information in the form. -3. (Optional) To quickly test the setup, you can create a test credential. Add a credential by selecting **Add APIKey** and specifying **Client ID** and **APIKey**. -4. Select **Add Policy**. -5. Select **Save and Publish**. - -#### Add a CORS Policy - -Depending on the domain, you might need to add a CORS policy to the API proxy in order to use the **Try It Out** feature on the Developer Portal. - -To add a CORS policy: - -1. Select the **Policies** menu item in the advanced section of the menu. -2. Select **Add Policy** for the CORS policy. then complete the required information in the form. -3. Add the header used in the APIKey policy above to the **OPTIONS** request. -4. Select **Add Policy**. -5. Select **Save and Publish**. - -### Create Credentials - -Log in to the Developer Portal as an API Consumer. Use the **Create Credentials** option to create credentials for the API. - -{{< call-out "important" >}} - - To avoid misuse, the API Consumer may create only one APIKey per API. - -{{< /call-out >}} - -### Try It Out on the Developer Portal - -Once the credentials have been created and are available, you can use the **Try It Out** feature on the Developer Portal to test the API using the newly created credentials. diff --git a/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md b/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md deleted file mode 100644 index 2160a0367..000000000 --- a/content/nms/acm/how-to/infrastructure/enable-sso-devportal.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -description: Learn how to enable Single Sign-On for Developer Portal. -nd-docs: DOCS-928 -title: Enable Single Sign-On for Developer Portal -toc: true -weight: 400 -type: -- how-to ---- - -{{< raw-html >}} - -{{< /raw-html >}} - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -Single sign-on (SSO) can be enabled on the Developer Portal to secure access to the portal and to allow authenticated API consumers to manage resource credentials. Logged-in consumers can then self-manage resource credentials for the APIs. - -Single sign-on is enabled by applying an OpenID Connect (OIDC) policy on the Developer Portal. The OIDC policy sets up the portal proxy to act as a relying party to authenticate users with the OIDC provider. - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. See [API Connectivity Manager Install Guide]({{< ref "/nim/deploy/_index.md" >}}). -- You have one or more Environments with [API Gateways]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Developer Portals]({{< ref "/nms/acm/getting-started/add-devportal" >}}). - -### Terminology - -The following terminology is used in this topic: - -{{}} - -| Term | Description | -|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| AuthCode | Authorization Code. | -| IDP | Identity Provider stores and verifies a user's identity as a service. | -| IDP Authorization Server | The IDP Authorization Server authenticates and issues access tokens to users. | -| OAuth | OAuth is an open-standard authorization protocol. | -| OIDC | OpenID Connect is an authentication protocol that adds an identity verification layer. | -| PKCE | Proof Key for Code Exchange. When public clients request Access Tokens, some additional security concerns are posed that are not mitigated by the Authorization Code Flow alone. PKCE needs the clients app to provide proof to the authorization server, to verify that the authorization code belongs to the clients' app. | -| URI | Uniform Resource Indicator. It is a unique character sequence which distinguishes one resource from another. | - -{{}} - -### Supported OIDC Identity Providers - -API Connectivity Manager supports all of the same identity providers as F5 NGINX Plus. The following guides describe how to configure NGINX Plus for these identity providers, and outline where to find the information you'll need to configure them for OIDC. - -- [Auth0](/nginx/deployment-guides/single-sign-on/auth0/) -- [Amazon Cognito](/nginx/deployment-guides/single-sign-on/cognito) -- [Keycloak](/nginx/deployment-guides/single-sign-on/keycloak) -- [Microsoft Active Directory FS](/nginx/deployment-guides/single-sign-on/active-directory-federation-services) -- [Okta](/nginx/deployment-guides/single-sign-on/okta) -- [OneLogin](/nginx/deployment-guides/single-sign-on/onelogin) -- [Ping Identity](/nginx/deployment-guides/single-sign-on/ping-identity) - -## Set up OIDC Policy - -You can set up OIDC policy by using either the web interface or the REST API. - -### Updating OIDC Policy - -{{}} - {{%tab name="Web Interface"%}} - -1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments** and select the **Edit Advanced Config** from the **Actions** menu for the cluster you want to set up. -2. Select the **Global Policies** tab. -3. For **OpenID Connect Relying Party** select **Add Policy** from the policy's **Actions** menu. -4. Update **Application Settings**. - -{{< include "acm/how-to/update-application-settings.md" >}} - -5. Update **Authorization Server Settings** - -{{< include "acm/how-to/update-authorization-server-settings.md" >}} - -6. Update **General Settings** - -{{< include "acm/how-to/update-general-settings.md" >}} - -7. Update **Custom Error Handling**. - - You can customize how the proxy should handle the following error conditions: - - - when Client ID is not supplied - - when there is no match for the Client ID - - Specify the HTTP error code in the box next to the error condition. The specified error code will be displayed when the related error condition is true. - -8. Select **Add**. -9. Select **Save and Submit** your changes. - - {{%/tab%}} - {{%tab name="REST API"%}} - -1. Send a POST request to add the OIDC policy to the cluster. - - - {{}} - - | Method | Endpoint | - |-------------|----------| - | POST | `/api/v1/infrastructure/workspaces/{{proxyWorkspaceName}}/environments`| - - - - {{}} - - - ```json - { - "name": "test", - "type": "NON-PROD", - "functions": [ - "DEVPORTAL" - ], - "systemProperties": { - "acmHostName": "" - }, - "proxies": [...], - "policies": { - "oidc-authz": [ - { - "action": { - "config": { - "jwksURI": "https:///v1/keys", - "tokenEndpoint": "https:///v1/token", - "userInfoEndpoint": "https:///v1/userinfo", - "authorizationEndpoint": "https:///v1/authorize", - "logOffEndpoint": "https:///v1/logout", - "authFlowType": "PKCE" - } - }, - "data": [ - { - "appName": "Myapp", - "clientID": "", - "scopes": "apigw+openid+profile+email+offline_access" - } - ] - } - ] - } - } - ] - } - ``` - - {{%/tab%}} -{{}} - -Single sign-on (SSO) is enabled on the Developer Portal after configuring the OIDC policy. Application developers can log in through the configured centralized identity provider (IDP). After a successful login, they can create resource credentials for the available APIs. - -## Known Limitation with the policy - -The OIDC policy does not yet support custom DNS for resolution. Only external DNS resolution is supported. diff --git a/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md b/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md deleted file mode 100644 index 2671eb7e2..000000000 --- a/content/nms/acm/how-to/infrastructure/manage-api-infrastructure.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - manage your API infrastructure. -nd-docs: DOCS-924 -title: Manage API Infrastructure -toc: true -weight: 100 -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -API Connectivity Manager lets you manage your API infrastructure by using a set of hierarchical resources. The top-level resource, called a **Workspace**, provides a logical grouping for resources called **Environments**. Environments contain **Clusters** that allocate NGINX instances for use as API Gateways and Developer Portals. - -You can use Workspaces to create isolated work areas for business units or teams. You can use Environments to allocate infrastructure resources for use within a team's Workspace. - -This guide provides instructions for using API Connectivity Manager Workspaces and Environments to manage your API infrastructure. - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, running, and licensed. -- You have SSH access to the host where API Connectivity Manager is running and can use the `sudo` command. -- You have installed a [supported version]({{< ref "/nim/fundamentals/tech-specs.md" >}}) of F5 NGINX Plus on each host that you want to add to a Cluster. -- You know the IP address or FQDN for each host that you want to add to a cluster. -- You have SSH access to each of the hosts that you want to allocate to a cluster and can use the `sudo` command. -- You have installed the [`njs`](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) module on each host that you want to add to the cluster. - -## How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui.md" >}} - -## Create a Workspace {#create-workspace} - -{{}} - -{{%tab name="UI"%}} - -Take the steps below to create a new Workspace. - -1. From the API Connectivity Manager **Infrastructure** landing page, select **Create Workspace**. -1. In the **Create Workspace** drawer, provide a **Name** and **Description**. - - - **Name**: (required) A name can be any combination of lowercase letters, hyphens, numbers, or underscores. Spaces and capital letters are not allowed. - - **Description**: (optional; 150-character limit) The description should help others in your organization understand the nature or purpose of the Workspace. - -1. (Optional) Select the **Contact Information** box to designate someone as the Workspace's owner. Then, provide the following information: - - - **Contact Name** - - **Contact Email** - - **Slack**: The contact's Slack handle - -1. Select **Create** to save your changes. - -The **Create Workspace** drawer will display a confirmation when the Workspace has been created. From there, you can go on to [Add an Environment](#Add-an-environment) or go back to the Workspaces landing page. - -{{%/tab%}} -{{%tab name="API"%}} - - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces`| - -{{}} - - -```json -{ - "name": "{{infraWorkspaceName}}", - "metadata": { - "description": "Petstore Team Workspace" - }, - "contactDetails": { - "adminEmail": "admin@example.com", - "adminName": "I.M. Admin", - "adminPhone": "555 123 1234" - } -} -``` - -{{%/tab%}} -{{}} -## Add an Environment {#add-environment} -{{}} - -{{%tab name="UI"%}} - -After creating a Workspace, you must create at least one Environment. When creating an Environment, you will also create the Clusters where your API Gateway(s) and/or Developer Portal(s) will reside. - -{{< call-out "caution" >}} - -- Do not add the same host to both an API Gateway cluster and a Developer Portal cluster. -- The Developer Portal cluster requires at least one dedicated host. -{{< /call-out >}} - -Take the steps below to add an Environment. - -1. On the **Workspaces** landing page, select the ellipsis (`...`) icon for your desired Workspace. -1. Select **Add Environment**. -1. In the **Add Environment** drawer, provide the requested information: - - **Name** (required) - - **Description** (optional) - - **Type**: **Production** (**prod**) or **Non-Production** (**non-prod**) -1. In the **API Gateways** section, provide the **Name** and **Hostname** of at least one instance that you want to add to the cluster. - - This instance, or instance group, will host the API Gateway. -1. (Optional) In the **Developer Portals** section, provide the **Name** and **Hostname** of at least one instance that you want to add to the cluster. - - This instance, or instance group, will host the Developer Portal. - - {{< call-out "note" >}}The Dev Portal requires a separate, dedicated host. Do not install the Dev Portal on a host that is already running the management or data planes.{{< /call-out >}} -1. Select the **Create** button to create the Environment. The **Add Environment** drawer will display a confirmation when the Environment has been created. -1. Copy the `cURL` or `wget` command shown in the confirmation drawer and save it -- you will need to use this information to [add your NGINX instances to the cluster](#register-nginx-instance). - -{{%/tab%}} -{{%tab name="API"%}} -{{}} - -| Parameter | Description | -|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `proxies.proxyClusterName` | The group of NGINX instances where configuration will be written | -| `proxies.hostnames` | An IP Address or fully qualified domain name (FQDN) used to identify the API-Gateway environment| - -{{}} - - - -{{}} - -| Method | Endpoint | -|-------------|----------| -| POST| `/infrastructure/workspaces/{{infraWorkspaceName}}/environments`| - -{{}} - - -```json -{ - "name": "{{environmentname}}", - "proxies": [ - { - "proxyClusterName": "{{proxyClusterName}}", - "hostnames": [ - "{{environmentHostname}}" - ] - } - ] -} -``` - -{{%/tab%}} -{{}} -## Onboard an NGINX Instance {#register-nginx-instance} - -[Install the NGINX Agent]({{< ref "/nms/nginx-agent/install-nginx-agent" >}}) on each host to register the instance with API Connectivity Manager as part of the cluster. - -Take the steps below to add an NGINX instance to an API Gateway. - -1. Use SSH to log in to the host machine. -1. Run the `cURL` or `wget` install command that was displayed in the **Environment Created** confirmation drawer. -1. When the installation is complete, the instance will appear in the **Instances** list for the cluster in the API Connectivity Manager user interface. -1. After running the `cURL` command you can check the environment job status on the environments page -{{Environment Onboarding Status.}} - - -## Environment Statuses -{{}} - - -| Status | Description | -|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Configuring` | ACM have received the changes and will attempt to deploy to the instance group | -| `Pending` | Check that instance group contains instances, see Instance Groups Overview page | -| `Fail` | Deploying configurations have failed, review the Events page for more details | -| `Success` | Changes have been successfully deployed to the instance group | - -{{}} - -> {{< icon "fa-solid fa-circle-question" >}} **Lost your install command?** -> -> Don't worry! You can take the steps below to recover it: -> -> 1. In the API Connectivity Manager user interface, go to **Infrastructure > Environments > \**. -> 1. Click anywhere in the row of the Cluster that you want to add an instance to. -> 1. The **Onboarding Commands** will be shown in the cluster details drawer. diff --git a/content/nms/acm/how-to/infrastructure/publish-developer-portal.md b/content/nms/acm/how-to/infrastructure/publish-developer-portal.md deleted file mode 100644 index 1ddeacdb4..000000000 --- a/content/nms/acm/how-to/infrastructure/publish-developer-portal.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager - web interface to create, update, or delete a Developer Portal. -nd-docs: DOCS-901 -title: Publish a Developer Portal -toc: true -weight: 300 -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -You can use API Connectivity Manager to create and publish Developer Portals (or, "Dev Portals") to host your APIs and documentation. API Connectivity Manager Dev Portals provide a [framework for customization]({{< ref "/nms/acm/how-to/infrastructure/customize-devportal.md" >}}) that lets you match your Dev Portal to your brand's or business' requirements. - -You can also modify and delete your Developer Portals using API Connectivity Manager. - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- Your [Infrastructure]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md" >}}) has one or more Environments with a [Developer Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) cluster. -- You have verified that you can access the Developer Portal using the configured hostname. - - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -## Create a Developer Portal - -### Create the Services Workspace - -1. Under **Modules**, select **API Connectivity Manager**. -1. On the sidebar, select **Services**. -1. On the **Services - Workspaces** section, select **Create Workspace**. -1. In the **Create Workspace** drawer, provide a **Name** and **Description**. - - **Name**: (required) A name can be any combination of lowercase letters, hyphens, numbers, or underscores. Spaces and capital letters are not allowed. - - **Description**: (optional; 150-character limit) The description should help others in your organization understand the nature or purpose of the Workspace. -1. (Optional) Select the **Contact Information** box to designate someone as the Workspace's owner. Then, provide the following information: - - - **Contact Name** - - **Contact Email** - - **Slack**: The contact's Slack handle - -1. Select **Create** to save your changes. - - -## Modify Developer Portal Resources - -### Edit Workspace Description and Contact Information - -1. On the sidebar, select **Services**. -1. Select the ellipsis button next to your workspace on the **Actions** column. -1. Select **Edit Workspace**. -1. Update the **Description** and **Workspace Contact Information** as needed. -1. Select **Save**. - -## Delete Developer Portal Resources - -### Remove a Developer Portal from an API Proxy - -1. On the sidebar, select **Services**. -1. Select your workspace from the list. -1. On the **API Proxies** section, select the ellipsis button next to your API Proxy in the **Actions** column. -1. Select **Edit Proxy**. -1. On the **Basic > Configuration** section, uncheck **Also publish API to developer portal**. -1. Select **Save & Publish**. - -### Delete API Docs - -1. On the sidebar, select **Services**. -1. Select your workspace from the list. -1. On the **API Docs** section, select the ellipsis button next to your API Doc in the **Actions** column. -1. Select **Delete API Doc**. -1. Select **Delete** to confirm the action. - -### Delete Services Workspaces - -{{< call-out "note" >}}To delete a Workspace, you must delete all the API Proxies and API Docs belonging to a Services Workspace.{{< /call-out >}} - -1. On the sidebar, select **Services**. -1. Select the ellipsis button next to your workspace in the **Actions** column. -1. Select **Delete workspace**. -1. Select **Delete** to confirm the action. diff --git a/content/nms/acm/how-to/install-acm-offline.md b/content/nms/acm/how-to/install-acm-offline.md deleted file mode 100644 index 41b90087b..000000000 --- a/content/nms/acm/how-to/install-acm-offline.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: "Offline Installation Guide" -nd-docs: "DOCS-1669" ---- - -## Install or Upgrade API Connectivity Manager {#install-or-upgrade-acm-offline} - -{{< eol-call-out "warning" "End of Sale Notice:" >}} -F5 NGINX is announcing the **End of Sale (EoS)** for NGINX Instance Manager API Connectivity Manager Module, **effective January 1, 2024**. - -F5 maintains generous lifecycle policies that allow customers to continue support and receive product updates. Existing API Connectivity Manager Module customers can continue to use the product past the EoS date. **License renewals are not available after September 30, 2024**. - -See our [End of Sale announcement](https://my.f5.com/manage/s/article/K000137989) for more details. -{{< /eol-call-out >}} - -### Dependencies with Instance Manager {#acm-nim-dependencies} - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -### Install API Connectivity Manager {#install-acm} - -{{< call-out "important" >}} -API Connectivity Manager requires Instance Manager to be installed first. - -Before you begin: - -1. Review the [Dependencies with Instance Manager](#acm-nim-dependencies) table above. -2. [Install a compatible version of Instance Manager](#install-nim-offline). -{{< /call-out >}} - -  - -{{}} -{{%tab name="CentOS, RHEL, and RPM-Based"%}} - -To install API Connectivity Manager, take the following steps: - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package files. - -2. Install the API Connectivity Manager package: - - ```bash - sudo rpm -ivh --nosignature /home//nms-api-connectivity-manager_.x86_64.rpm - ``` - -{{%/tab%}} -{{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -To install API Connectivity Manager, take the following steps: - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package files. - -2. Install the API Connectivity Manager package: - - ```bash - sudo apt-get install -f /home//nms-api-connectivity-manager__amd64.deb - ``` - - -{{%/tab%}} -{{}} - -3. Enable and start the API Connectivity Manager service: - - ```bash - sudo systemctl enable nms-acm --now - ``` - - F5 NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. - -4. Restart the NGINX web server: - - ```bash - sudo systemctl restart nginx - ``` - -### Post-Installation Steps {#acm-post-install-steps} - -{{< include "installation/optional-installation-steps.md" >}} - -See these topics below for instructions on how to access the web interface and add your license: - -- [Access the web interface](#access-web-ui) -- [Add a license](#add-license) - -### Upgrade API Connectivity Manager {#upgrade-acm-offline} - -{{}} -{{%tab name="CentOS, RHEL, and RPM-Based"%}} - -To upgrade API Connectivity Manager to a newer version, take the following steps: - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package file. - -2. Upgrade the API Connectivity Manager package: - - ```bash - sudo rpm -Uvh --nosignature /home/user/nms-api-connectivity-manager_.x86_64.rpm - ``` - -{{%/tab%}} -{{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -To upgrade API Connectivity Manager to a newer version, take the following steps: - -1. Log in to the [MyF5 Customer Portal](https://account.f5.com/myf5) and download the API Connectivity Manager package file. - -2. Upgrade the API Connectivity Manager package: - - ```bash - sudo apt-get -y install -f /home/user/nms-api-connectivity-manager__amd64.deb - ``` - -{{%/tab%}} -{{}} - - -3. Restart the NGINX Management Suite platform services: - - ```bash - sudo systemctl restart nms - ``` - - NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. - -4. Restart the API Connectivity Manager service: - - ```bash - sudo systemctl restart nms-acm - ``` - -5. Restart the NGINX web server: - - ```bash - sudo systemctl restart nginx - ``` - -6. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< ref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore SELinux contexts (`restorecon`) for the files and directories related to NGINX Management Suite. - - -### Set Up the Data Plane {#acm-offline-dependencies} - -The API Connectivity Manager data plane requires [NGINX Plus R24](https://docs.nginx.com/nginx/) or later and [njs](https://nginx.org/en/docs/njs/).. - -1. Log in to MyF5 and download your `nginx-repo.crt` and `nginx-repo.key` files. -2. Copy the `nginx-repo.crt` and `nginx-repo.key` files to the `/etc/ssl/nginx/` directory: - - ```bash - sudo cp nginx-repo.crt /etc/ssl/nginx/ - sudo cp nginx-repo.key /etc/ssl/nginx/ - ``` - -3. Select the following link to download the `fetch-external-acm-dataplane-dependencies.sh` script. This script downloads the necessary NGINX Plus and njs packages to a `tar.gz` archive. - - {{}} {{}} - -4. To download the NGINX Plus and njs dependencies, run the `fetch-external-acm-dataplane-dependencies.sh` script. As parameters, specify your Linux distribution and the location of your `nginx-repo.crt` and `nginx-repo.key` files. - - ```bash - sudo bash fetch-external-acm-dataplane-dependencies.sh /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key - ``` - - Supported Linux distributions: - - - `ubuntu18.04` - - `ubuntu20.04` - - `debian10` - - `debian11` - - `centos7` - - `centos8` - - `rhel7` - - `rhel8` - - `amzn2` - - For example, to download external dependencies for Ubuntu 20.04: - - ```bash - sudo bash fetch-external-acm-dataplane-dependencies.sh ubuntu20.04 /etc/ssl/nginx/nginx-repo.crt /etc/ssl/nginx/nginx-repo.key - ``` - - In this example, the script creates an archive called `acm-dataplane-dependencies-ubuntu20.04.tar.gz` with the external dependencies. - -5. After you copy and extract the bundle onto your target machine, take the following steps to install the packages: - - {{< call-out "note" >}}The bundled NGINX Plus package may conflict with installed versions of NGINX Plus. Delete the package from the bundle if you want to keep the existing version.{{< /call-out >}} - - {{}} - {{%tab name="CentOS, RHEL, and RPM-Based"%}} - -```bash -tar -kzxvf acm-dataplane-dependencies-.tar.gz -sudo rpm -ivh *.rpm -``` - - {{%/tab%}} - {{%tab name="Debian, Ubuntu, and Deb-Based"%}} - -```bash -tar -kzxvf acm-dataplane-dependencies-.tar.gz -sudo dpkg -i ./*.deb -``` - -{{%/tab%}} -{{}} diff --git a/content/nms/acm/how-to/install-acm.md b/content/nms/acm/how-to/install-acm.md deleted file mode 100644 index 46973a608..000000000 --- a/content/nms/acm/how-to/install-acm.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: Follow the steps in this guide to install or upgrade F5 NGINX Management - Suite API Connectivity Manager. -nd-docs: DOCS-1213 -layout: acm-eos -title: Install or Upgrade API Connectivity Manager -toc: true -weight: 10 -type: -- tutorial ---- - ---- - -## Before You Begin - -### Security Considerations - -{{< include "installation/secure-installation.md" >}} - -### Installation Prerequisites - -{{< include "installation/nms-prerequisites.md" >}} - -### Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - ---- - -## Install API Connectivity Manager - -{{}} - -{{%tab name="CentOS, RHEL, RPM-Based"%}} - -1. To install the latest version of API Connectivity Manager, run the following command: - - ```bash - sudo yum install -y nms-api-connectivity-manager - ``` - -{{%/tab%}} - -{{%tab name="Debian, Ubuntu, Deb-Based"%}} - -1. To install the latest version of API Connectivity Manager, run the following commands: - - ```bash - sudo apt-get update - sudo apt-get install nms-api-connectivity-manager - ``` - -{{%/tab%}} - -{{}} - -2. Enable and start the F5 NGINX Management Suite services: - - ```bash - sudo systemctl enable nms nms-core nms-dpm nms-ingestion nms-integrations nms-acm --now - ``` - - NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. - -3. Restart the NGINX web server: - - ```bash - sudo systemctl restart nginx - ``` - -### Post-Installation Steps - -{{< include "installation/optional-installation-steps.md" >}} - -### Accessing the Web Interface - -{{< include "installation/access-web-ui.md" >}} - -### Add License - -A valid license is required to make full use of all the features in API Connectivity Manager. - -Refer to the [Add a License]({{< ref "/nim/admin-guide/add-license.md" >}}) topic for instructions on how to download and apply a trial license, subscription license, or Flexible Consumption Program license. - ---- - -## Upgrade API Connectivity Manager {#upgrade-acm} - -{{< call-out "note" >}}When you confirm the upgrade, the upgrade process will automatically upgrade dependent packages as needed, including Instance Manager. If you prefer to [back up NGINX Management Suite]({{< ref "/nim/admin-guide/maintenance/backup-and-recovery.md" >}}) before upgrading, you can cancel the upgrade when prompted.{{< /call-out >}} - -
      - -{{}} -{{%tab name="CentOS, RHEL, RPM-Based"%}} - -1. To upgrade to the latest version of API Connectivity Manager, run the following command: - - ```bash - sudo yum update -y nms-api-connectivity-manager - ``` - -{{%/tab%}} - -{{%tab name="Debian, Ubuntu, Deb-Based"%}} - -1. To upgrade to the latest version of the API Connectivity Manager, run the following command: - - ```bash - sudo apt-get update - sudo apt-get install -y --only-upgrade nms-api-connectivity-manager - ``` - -{{%/tab%}} -{{}} - -2. Restart the NGINX Management Suite platform services: - - ```bash - sudo systemctl restart nms - ``` - - NGINX Management Suite components started this way run by default as the non-root `nms` user inside the `nms` group, both of which are created during installation. - -3. Restart the API Connectivity Manager service: - - ```bash - sudo systemctl restart nms-acm - ``` - -4. Restart the NGINX web server: - - ```bash - sudo systemctl restart nginx - ``` - -5. (Optional) If you use SELinux, follow the steps in the [Configure SELinux]({{< ref "/nim/system-configuration/configure-selinux.md" >}}) guide to restore the default SELinux labels (`restorecon`) for the files and directories related to NGINX Management Suite. - ---- -## What's Next - -### Set Up the Data Plane - -API Connectivity Manager requires one or more data plane hosts for the API Gateway. - -Complete the following steps for each data plane instance you want to use with API Connectivity Manager: - -1. [Install NGINX Plus R24 or later](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) -2. [Install NGINX njs module](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/nginscript/) -3. [Install the NGINX Agent]({{< ref "/nms/nginx-agent/install-nginx-agent.md" >}}) on your data plane instances to register them with NGINX Management Suite. - -### Install the Developer Portal - -- [Install the Developer Portal]({{< ref "/nms/acm/how-to/devportals/installation/install-dev-portal.md" >}}) - -### Get Started with API Connectivity Manager - -- [Create Workspaces and Environments for your API Infrastructure]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md" >}}) diff --git a/content/nms/acm/how-to/policies/_index.md b/content/nms/acm/how-to/policies/_index.md deleted file mode 100644 index d68daee96..000000000 --- a/content/nms/acm/how-to/policies/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Policies -weight: 1000 -url: /nginx-management-suite/acm/how-to/policies/ ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/policies/access-control-routing.md b/content/nms/acm/how-to/policies/access-control-routing.md deleted file mode 100644 index e001a5487..000000000 --- a/content/nms/acm/how-to/policies/access-control-routing.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -description: Learn how to restrict access to your application servers based on JWT - claims or header values. -nd-docs: DOCS-1265 -title: Access Control Routing -toc: true -weight: 300 -type: -- how-to ---- - -{{< shortversions "1.3.0" "latest" "acmvers" >}} - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) clusters. -- You have published one or more [API Gateways or Developer Portals]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) with either JSON Web Token Assertion or OAuth2 Introspection enabled. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - -## Create Access Control Routing Policy - -Take the steps in this section if you would like to restrict access to Advanced Routes or HTTP methods based on either request headers or JWT tokens. - -{{}} - {{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Proxy. -1. Select **Edit Proxy** from the Actions menu for the desired API Proxy. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu. -1. Select **Add route** to configure a rule. Select one or more keys and approved values which will be checked before allowing the end user access to the API. Optionally select an Advanced Route or list of HTTP methods which will restrict the Access Control check to requests which match that configuration. -1. Optionally set the return code, which should be returned to requests which do not satisfy the condition specified. - - - {{%/tab%}} - {{%tab name="API"%}} - -```json -"policies": { - "access-control-routing": [ - { - "action": { - "conditions": [ - { - "allowAccess": { - "httpMethods": ["GET"] - }, - "when": [ - { - "key": "token.role", - "matchType": "STRING", - "matchOneOf": { - "values": [ - "admin" - ] - } - }, - { - "key": "token.sub", - "matchType": "REGEX", - "matchOneOf": { - "values": [ - "^.*test.com" - ] - } - } - ] - } - ] - } - } - ] -``` - - {{%/tab%}} -{{}} - -{{< call-out "note" >}} - -- Any requests which do not match a specified condition will be allowed to access the API Gateway or Developer Portal. Adding a rule with no route or HTTP method specified means that -- Adding multiple match conditions in a rule requires that all conditions are matched in order to access the API. -- Adding the same configuration of route and HTTP method to multiple rules will be treated as an OR condition. -- Any requests which match multiple rules will be checked from most to least specific. -- If `matchType` is not specified, `STRING` will be used. -- If the token claim is an array value, `STRING` and `REGEX` behave differently. - - `STRING` will match if any of the values contained in the array match one of the values. - - `REGEX` will check against the array converted to a comma-separated string. For example, `[ "first", "second", "third" ]` will become `first,second,third` when the regular expression is checked against it. - -{{< /call-out >}} - -## Verification - -1. Attempt to contact the API Gateway or Developer Portal from a client -1. Contact the IP address from an allowed IP address. The traffic should not be denied. - - diff --git a/content/nms/acm/how-to/policies/advanced-security.md b/content/nms/acm/how-to/policies/advanced-security.md deleted file mode 100644 index c302322bd..000000000 --- a/content/nms/acm/how-to/policies/advanced-security.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -description: Learn how to add an F5 WAF for NGINX policy to your environment - by using the Advanced Security policy in NGINX Management Suite API Connectivity - Manager. -nd-docs: DOCS-1264 -title: Advanced Security -toc: true -weight: 350 -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## About Advanced Security Policy - -Use the *Advanced Security* policy to add a pre-defined F5 NGINX App Protect to your deployment. Doing so will apply the rules specified in the policy to your APIs. -This will allow enforcement of rules to *Block* or *Monitor* security events triggering those violations set out in the policy. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more [Environments with an API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have [published one or more API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}). -- You have [installed and set up NGINX App Protect]({{< ref "/nap-waf/v4/admin-guide/install-nms" >}}). - ---- - -## Policy Settings - -The applied policy is configurable, and all events created by rule violations will go to the `Security Monitoring` dashboard in NGINX Management Suite. - -To create a new policy or modify an existing policy, you can navigate to the *App Protect* area of the NGINX Management Suite. - -*NGINX App Protect* policies can also contain a reference to an Open API Specification which will enable payload schema validation on the dataplane instance. - -{{< call-out "note" >}} - -For information on how to configure an *App Protect* policy, please visit - [Configure F5 WAF for NGINX](https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#policy-configuration-overview) - -To create an F5 WAF for NGINX policy to use in your Advanced Security policy, please see the [Create a Policy]({{< ref "/nim/nginx-app-protect/manage-waf-security-policies#create-security-policy" >}}) documentation. -{{< /call-out >}} - ---- - -## Applying the Policy - -*NGINX App Protect* policies can be applied to both *Environments* and *Proxies*, allowing for granular control. - -Should you wish to configure a global monitoring policy (non-blocking), but require blocking on only a subset of your API endpoints, you can apply a monitoring policy to your environment and a blocking policy on the proxy you have deployed to that environment. - -This means that only the specific *Proxy* that you have applied the policy to will be enforced in blocking mode and the other endpoints in that environment are unaffected, inherting the monitoring policy from their parent *Environment*. - -*Proxies* in an *Environment* can also each have their own different policies applied should that be required. - -There are two methods available to enable adding an *Advanced Security* policy to your Deployment: - -
      -Environment -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an *Advanced Security* policy using the REST API, send an HTTP `POST` or `PUT` request to the *Environments* endpoint. - - -{{}} - -| Method | Endpoint | -|--------|----------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{infra-workspace}/environments` | -| `PUT` | `/infrastructure/workspaces/{infra-workspace}/environments/{environment-name}` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "advanced-security": [ - { - "action": { - "policyRef": "" - } - } - ] - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create an *Advanced Security* policy using the web interface: - -{{< include "acm/webui-acm-login.md" >}} - -1. On the left menu, select **Infrastructure**. -2. Select a workspace in the list that contains the Environment you want to update. -3. On the workspace overview page, on the **Environments** tab, locate the Environment you want to update and select it. -4. On the Environment Overview page, locate the **API Gateway** you want to update and select it. -5. On the **API Gateway** overview page, find and select the **Manage** button and select it. -6. On the *Advanced > Global Policies* page, locate **Advanced Security Policy**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -7. On the *Advanced Security Policy* form, complete the necessary fields: -8. - - **Choose a NAP Policy Reference**: Specify the name of the policy you want to apply from the dropdown - -9. Select **Add**/**Save** to apply the policy to the Environment. -10. Select **Save and Submit** to deploy the configuration to the Environment. - -{{%/tab%}} - -{{
      }} -
      - -
      -Proxy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an *Advanced Security* policy using the REST API, send an HTTP `POST` or `PUT` request to the Proxies endpoint. - - -{{}} - -| Method | Endpoint | -|--------|----------------------------------------------------| -| `POST` | `/services/workspaces/{service-workspace}/proxies` | -| `PUT` | `/services/workspaces/{service-workspace}/proxies/{proxy-name}` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "api-advanced-security": [ - { - "action": { - "policyRef": "", - "appProtectMode": "" - } - } - ] - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create an *Advanced Security* policy using the web interface: - -{{< include "acm/webui-acm-login.md" >}} - -1. On the left menu, select **Services**. -2. Select a workspace in the list that contains the *Proxy* you want to update. -3. On the workspace overview page, on the **API Proxies** tab, locate the *Proxy* you want to update and Select the **Actions** menu (represented by an ellipsis, `...`) and select **Edit proxy** -4. On the *Policies* page, locate **Advanced Security**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -5. On the *Advanced Security Policy* form, complete the necessary fields: - - - **Choose your App Protect mode**: This allows the enforcement or non-enforcement on a particular group of API endpoints, you may want to disable *App Protect* for some endpoints but not others. - - **Choose a NAP Policy Reference**: Specify the name of the policy you want to apply from the dropdown. - -6. Select **Add**/**Save** to apply the policy to the *Proxy*. -7. Select **Save and Submit** to deploy the configuration to the *Proxy*. - -{{%/tab%}} - -{{
      }} - -
      diff --git a/content/nms/acm/how-to/policies/allowed-http-methods.md b/content/nms/acm/how-to/policies/allowed-http-methods.md deleted file mode 100644 index 39de271d0..000000000 --- a/content/nms/acm/how-to/policies/allowed-http-methods.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -description: Learn how to block unwelcome requests to an endpoint by using the Allowed - HTTP Methods policy in F5 NGINX Management Suite API Connectivity Manager. -nd-docs: DOCS-1121 -title: Allowed HTTP Methods -toc: true -weight: 350 -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## About Allow HTTP Methods Policy - -Use the *Allowed HTTP Methods* policy to specify which methods you want to allow, while automatically blocking all the others. As an example, you could allow only `GET` requests for static content. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more [Environments with an API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have [published one or more API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}). - ---- - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|------------------|-------|------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-----------------------| -| `allowedMethods` | array | `GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE` |

      This array contains all of the possible HTTP methods.

      Methods listed in `allowedMethods` will be accepted; any omitted methods will be blocked with a return code of `405 Method Not Allowed` (default), or a code of your choice.

      Note: `HEAD` requests are treated the same as `GET` requests.

      | Yes | | -| `returnCode` | int | In range `400-599` | The status code to be returned if a method isn't included in the `allowedMethods` array. | No | System assigned `405` | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -Follow these steps to restrict which HTTP methods clients can use to access your API. If the request's HTTP method is not in the allowed methods list, a `405 Method Not Allowed` response is returned by default, or you can specify a different error code. - -{{< call-out "note" >}} By enabling the `GET` method, the `HEAD` method is also enabled. {{< /call-out >}} - -
      - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an *Allowed HTTP Methods* policy using the REST API, send an HTTP `POST` request to the Proxies endpoint. - - -{{}} - -| Method | Endpoint | -|--------|----------------------------------------------------| -| `POST` | `/services/workspaces/{service-workspace}/proxies` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "allowed-http-methods": [ - { - "action": { - "allowedMethods": [ - "GET", - "PUT", - "POST", - "PATCH", - "DELETE", - "CONNECT", - "OPTIONS", - "TRACE" - ], - "returnCode": 405 - } - } - ] - } -} -``` - -This JSON defines an *Allowed HTTP Methods* policy that specifies which HTTP methods are allowed. The listed methods (`GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`) are all allowed, and any other methods will return a `405 Method Not Allowed` response code. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create an *Allowed HTTP Methods* policy using the web interface: - -1. {{< include "acm/webui-acm-login.md" >}} -2. On the left menu, select **Services**. -3. Select a workspace in the list that contains the API Proxy you want to update. -4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -5. On the left menu, select **API Proxy > Advanced > Policies**. -6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Allowed HTTP Methods**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -7. On the *Allowed HTTP Methods* form, complete the necessary fields: - - - **Allow following HTTP Methods**: Specify the HTTP methods you want to allow. Any methods that aren't included will be blocked. - - **Custom response code for non-matching requests**: Specify the status code to return for blocked methods. The default is `405 Method Not Allowed`. - -8. Select **Add** to apply the policy to the API proxy. -9. Select **Save and Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/api-access-control-lists.md b/content/nms/acm/how-to/policies/api-access-control-lists.md deleted file mode 100644 index 1af0524f4..000000000 --- a/content/nms/acm/how-to/policies/api-access-control-lists.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -description: Learn how to protect your upstream TCP application servers by denying/allowing - access from certain client IP addresses, CIDR blocks, client IDs or JWT Claims. -nd-docs: DOCS-950 -toc: true -weight: 200 -title: API Access Control Lists ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) clusters. -- You have published one or more [API Gateways or Developer]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - ---- - -## Create ACL IP Restriction Policy - -Take the steps in this section if you would like to deny or allow access to your API Gateways or Developer Portals to specific IP addresses or CIDR blocks with ACL lists. - -{{}} - {{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Proxy. -1. Select **Edit Proxy** from the Actions menu for the desired API Proxy. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu. -1. Provide the desired **Allowed IP Addresses** and/or **Denied IP Addresses**. Valid values include IPv4, IPv6, and CIDR blocks. To allow or deny all, use the * symbol. - - - {{%/tab%}} - {{%tab name="API"%}} - -```json -"policies": { - "acl-ip": [ - { - "action": { - "deny": ["*"], // Polulate this array with your denied IP addresses - "allow": ["10.0.0.1"] - } - } - ] - } -``` - - {{%/tab%}} -{{}} - -{{< call-out "note" >}} - -- If you only set an allow list, then the deny list will default to deny all and vice versa. -- If IP addresses are not explicitly allowed they will be denied. To allow IP addresses as default, include the `*` symbol in the allow list. -- The most specific rule applied will be used to allow or deny traffic. For example, IP addresses take priority over CIDR blocks. Smaller CIDR blocks take priority over larger ones. -{{< /call-out >}} - - - -### Verification - -1. Attempt to contact the API Gateway or Developer Portal from a denied IP address. The host should return the default `403 Forbidden` return code or the custom return code you have set. -1. Contact the IP address from an allowed IP address. The traffic should not be denied. - -## Create ACL Consumer Restriction Policy - -Specific consumer client IDs or token claims can be denied or allowed access to your API Gateways or Developer Portals by following the steps in this section. - -{{}} - {{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \**, where "your workspace" is the workspace that contains the API Gateway or Dev Portal. -1. Select **Edit Advanced Config** from the **Actions** menu for the desired API Gateway or Dev Portal. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for the **ACL Consumer Restriction Policy**. -1. Set the **lookupVariable**. To route based on either the **APIKey Authentication** or **Basic Authentication**, use "client.id" to limit the user based on client ID. For a token-based policy such as **JSON Web Token Assertion** or **OAuth2 Introspection**, you should use "token.{claimKey}. For example: "token.sub" would use the sub claim of a JWT Token. -1. Provide the desired **Allowed List** and/or **Denied List**. - - {{%/tab%}} - {{%tab name="API"%}} - -```json -"policies": { - "acl-consumer": [ - { - "action": { - "lookupVariable": "client.id", - "allow": ["allowed-user"], - "deny": ["denied-user"] - } - } - ] - } -``` - - {{%/tab%}} - -{{}} - -{{< call-out "note" >}} - -- If you only set an allow list, then the deny list will default to deny all and vice versa. -- If values are not allowed, they will be denied by default if neither list contains a wildcard. - {{< /call-out >}} - -### Verification - -1. Attempt to contact the API Gateway or Developer Portal from a denied using a client that has been denied. The host should return the default `403 Forbidden` return code. -1. Attempt to contact the API Gateway or Developer Portal from an allowed client. The traffic should should be successfully proxied. diff --git a/content/nms/acm/how-to/policies/apikey-authn.md b/content/nms/acm/how-to/policies/apikey-authn.md deleted file mode 100644 index 4bfae0ca0..000000000 --- a/content/nms/acm/how-to/policies/apikey-authn.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - secure API Gateways by applying an API key authentication policy. -nd-docs: DOCS-1117 -toc: true -weight: 400 -title: API Key Authentication -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## API Key Authentication - -{{< call-out "warning" >}} API key authentication is recommended for test environments only. For production environments, consider a more robust authentication method. {{< /call-out >}} - -Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on API key authentication. - -An API key is usually a long, pseudo-random string included in the request header or request URL. It is a shared secret between the API client and the API gateway. The server allows the client to access data only after the client authenticates the API key. - -API Connectivity Manager API owners can restrict access to their APIs with API keys. The API Proxy Policy can be configured to grant access to APIs only after verifying that the API Key is valid. - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with an [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have published one or more [API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - ---- - -## Create an API Key Authentication Policy - -Take the steps in this section if you want to restrict access to APIs to clients with a valid API key. You can set up an API key authentication policy using either the web interface or the REST API. - -{{}} -{{%tab name="API"%}} - -Send a POST request to add the API key authentication policy to the API Proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -{{< call-out "note" >}} To include sensitive data in Proxy `GET` requests, provide the query parameter `includes=sensitivedata`; otherwise, the response will have this data redacted. {{< /call-out >}} - -```json -{ - "policies": { - "apikey-authn": [ - { - "action": { - "apiKeyName": "apikey", - "suppliedIn": "header", - "credentialForward": false, - "errorReturnConditions": { - "notSupplied": { - "returnCode": 401 - }, - "noMatch": { - "returnCode": 403 - } - } - }, - "data": [ - { - "clientID": "clientA", - "apiKey": "" - }, - { - "clientID": "clientB" - } - ] - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|------------------------------------------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `apiKeyName` | string | Example: `clientAPIKey` | The name of the header or query parameter where the API key will be located in the API request. | No | `apikey` | -| `suppliedIn` | string | One of `["HEADER","QUERY"]`| How the API key will be supplied by the consumer of the API via HTTP request. | No | `HEADER` | -| `credentialForward` | boolean | `true/false` | If the API key credential is proxy-forwarded to the backend service in the HTTP header or query parameters. | No | `False` | -| `errorReturnConditions`
      `.notSupplied`
      `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an invalid API key is supplied. | No | `401` | -| `errorReturnConditions`
      `.noMatch`
      `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an API key is not supplied. | No | `403` | -| `data.clientID` | string | Example: `ClientA` | Identifies the client who is holding the API Key. | Yes | N/A | -| `data.apiKey` | string | Example: `` | The value of the API Key used to access the API. If an API Key is not provided, a random 32-byte key will be created. | No | N/A | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. -2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. -3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **API Key Authentication**. -4. Provide the **API Key name** if different from the default value `apikey` and if the key should be provided in the request **Header** or as a **Query** parameter. -5. Set custom error return code conditions if an API Key is **not supplied** or **does not match** a key configured for API access. -6. By default, NGINX will strip the API key from the request headers before forwarding the request to the backend service. To preserve the API key header, enable the toggle for **Forward credentials to backend service**. -7. Configure the associated **Client ID** and **API Key** for each client that requires API access. If an **API Key** is not provided, a random 32-byte key will be created. Repeat this process for all clients. -8. Select **Add** to apply the API key authentication policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} diff --git a/content/nms/acm/how-to/policies/basic-authn.md b/content/nms/acm/how-to/policies/basic-authn.md deleted file mode 100644 index 662e57bb3..000000000 --- a/content/nms/acm/how-to/policies/basic-authn.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - secure API Gateways by applying a basic authentication policy. -nd-docs: DOCS-1118 -toc: true -weight: 450 -title: Basic Authentication -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## Basic Authentication - -{{< call-out "warning" >}} Basic authentication is recommended for test environments only. For production environments, consider a more robust authentication method. {{< /call-out >}} - -Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on basic authentication. - -Basic authentication is a method for HTTP users to provide a username and password when making an API request. In basic HTTP authentication, a request contains a header field in the form of `Authorization: Basic `, where credentials is the Base64 encoding of username and password joined by a single colon. - -API Connectivity Manager API owners can restrict access to their APIs with usernames and passwords. The API Proxy Policy can be configured to grant access to APIs only after verifying that the username and password are valid. - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with an [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have published one or more [API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - ---- - -## Create a Basic Authentication Policy - -Take the steps in this section if you want to restrict access to APIs to clients with a valid username and password. You can set up a basic authentication policy using either the web interface or the REST API. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add the basic authentication policy to the API Proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -{{< call-out "note" >}} To include sensitive data in Proxy `GET` requests, provide the query parameter `includes=sensitivedata`; otherwise, the response will have this data redacted. {{< /call-out >}} - -```json -{ - "policies": { - "basic-authn": [ - { - "action": { - "credentialForward": false, - "errorReturnConditions": { - "notSupplied": { - "returnCode": 401 - } - } - }, - "data": [ - { - "clientID": "ClientA", - "username": "UserA", - "password": "secret123" - } - ] - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|------------------------------------------------------------|----------|----------------------|------------------------------------------------------------------------------------------------------|----------|---------------| -| `credentialForward` | boolean | `true/false` | If the basic auth credentials are proxy-forwarded to the backend service in the HTTP header. | No | `False` | -| `errorReturnConditions`
      `.notSupplied`
      `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when invalid basic auth credentials are supplied. | No | `401` | -| `data.clientID` | string | Example: `ClientA` | Identifies the client who is holding the basic authentication credentials. | Yes | N/A | -| `data.username` | string | Example: `UserA` | The value of the client's password. | Yes | N/A | -| `data.password` | string | Example: `secret123` | The value of the client's username. | Yes | N/A | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. -2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. -3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **Basic Authentication**. -4. By default, NGINX will strip the basic authentication credentials from the request headers before forwarding the request to the backend service. To preserve the credentials, enable the toggle for **Forward credential**. -5. Set custom error return code conditions if basic authentication credentials are **not supplied**. -6. Configure the associated **Client ID**, **Username**, and **Password** for each client that requires API access. -7. Select **Add** to apply the basic authentication policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} diff --git a/content/nms/acm/how-to/policies/cluster-wide-config.md b/content/nms/acm/how-to/policies/cluster-wide-config.md deleted file mode 100644 index 0e7da1442..000000000 --- a/content/nms/acm/how-to/policies/cluster-wide-config.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: Learn how to configure the Cluster-Wide Config settings to fine tune - and control proxy cluster's behavior with performance enhancing configurations. -nd-docs: DOCS-1160 -title: Cluster-Wide Config -toc: true -weight: 498 -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-cluster-intro" >}} - ---- - -## About the Policy - -Use the *Cluster-Wide Config* settings to fine tune the worker connections, [hash table size](https://nginx.org/en/docs/hash.html), and keepalive settings to speed up data processing and improve the performance of the API proxy for large number of connections. When applied, the settings are applicable to all the instances in a proxy cluster. If the proxy cluster is shared between environments, the changes made in any environment will be reflected in all the other environments. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- Create an environment or edit an existing one. -- Check the cluster config settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each cluster. Save and publish the changes. - ---- - -## Policy Settings {#policy-settings} - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default | -|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| -| `mapHashBucketSize` | integer | example: 256 | Sets the bucket size for the map hash table. | No | 128 | -| `mapHashMaxSize` | integer | example: 2048 | Sets the maximum bucket size for the map hash table. | No | 2048 | -| `serverNamesHashBucket` | integer | example: 256 | Sets the bucket size for the server names hash tables | No | 256 | -| `serverNamesHashMaxSize` | integer | example: 1024 | Sets the maximum size of the server names hash tables. | No | 1024 | -| `workersConfig.connections` | integer | In range `256–65536` | Sets the maximum number of simultaneous connections that can be opened by a worker process. | No | 8192 | -| `workersConfig.maxProcesses` | string | `^(auto\|[1-9]\|[1-2][0-9]\|3[0-2])$` | Defines the number of worker processes. | No | auto | -| `workersConfig.maxLimitForOpenFile` | integer | In range `512–262144` | Changes the limit on the maximum number of open files (RLIMIT_NOFILE) for worker processes. Used to increase the limit without restarting the main process. | No | 20000 | -| `clientConnection.keepaliveTimeout` | string | ^([0-9]+)(([h\|m\|s]){1})$ | The first parameter sets a timeout during which a keep-alive client connection will stay open on the server side. | No | 75s | -| `clientConnection.keepaliveHeaderTimeout` | string | ^([0-9]+)(([h\|m\|s]){1})$ | ? | No | | -| `clientConnection.keepaliveRequests` | integer | In range `50–20000` | Sets the maximum number of requests that can be served through one keepalive connection. | No | 1000 | -| `clientConnection.keepaliveTime` | string | ^([0-9]+)(([h\|m\|s]){1})$ | Maximum time during which requests can be processed through one keepalive connection. | No | "1h" | -| `clientHeaderBuffer.size` | string | ([.\d]+)(?:M\|K) | Sets the maximum size of buffers used for reading a large client request header. | No | 8K | -| `clientHeaderBuffer.number` | integer | In range `1–64` | Sets the maximum number of buffers used for reading a large client request header. | No | 4 | -| `clientHeaderBuffer.timeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines a timeout for reading client request header. | No | "60s" | - -{{< /bootstrap-table >}} - - ---- - -## Updating Cluster-Wide Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an Cluster-Wide Config settings using the REST API, send an HTTP `PUT` request to the Add-Endpoint-Name-Here endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|---------------------| -| `PUT` | `/infrastructure/workspaces/{infraWorkspaceName}/proxy-clusters/{clusterName}`| - -{{}} - - -
      -JSON request - Cluster-Wide Config with minimum configuration - -``` json -{ - "policies": { - "cluster-wide-config": [], - } -} -``` - -
      - -
      -JSON request - Cluster-Wide Config with all options specified - -``` json -{ - "policies": { - "cluster-wide-config": [ - { - "action": { - "clientConnection": { - "keepaliveRequests": 1000, - "keepaliveTime": "1h", - "keepaliveTimeout": "75s" - }, - "clientHeaderBuffer": { - "number": 4, - "size": "8K", - "timeout": "60s" - }, - "mapHashBucketSize": 128, - "mapHashMaxSize": 2048, - "serverNamesHashBucket": 256, - "serverNamesHashMaxSize": 1024, - "workersConfig": { - "connections": 8192, - "maxLimitForOpenFile": 20000, - "maxProcesses": "auto" - } - }, - } - ], - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create a Cluster-Wide Config setting using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that contains your cluster's environment from the list of workspaces. -4. In the **Environments** section, select the environment name for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Cluster Config**. -6. On the left menu, select **Cluster Policies**. -7. In the list of Cluster Policies, the Cluster-Wide Config setting should be enabled by default. To edit the policy, select the ellipsis icon (`...`), then select **Edit Cluster Config**. -8. Customize the policy settings to suit your requirements. Refer to the [Policy Settings](#policy-settings) section for an overview of the available options and their possible configurations. -9. Select **Save** to save the changes. -10. Select **Save and Submit** to publish the policy changes to the environment. - -{{%/tab%}} - -{{
      }} - ---- - -## Verify the Policy - -Confirm that the policy has been set up and configured correctly by taking these steps: - -- Verify the NGINX configuration was applied by this policy. - - diff --git a/content/nms/acm/how-to/policies/cluster-zone-sync.md b/content/nms/acm/how-to/policies/cluster-zone-sync.md deleted file mode 100644 index 4ef0bf02a..000000000 --- a/content/nms/acm/how-to/policies/cluster-zone-sync.md +++ /dev/null @@ -1,415 +0,0 @@ ---- -description: Learn how to configure the Cluster Zone Sync policy to enable runtime - state sharing between the instances belonging to a proxy cluster. -nd-docs: DOCS-1159 -title: Cluster Zone Sync -toc: true -weight: 499 -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-cluster-intro" >}} - ---- - -## About the Policy - -Use the *Cluster Zone Sync* policy to enable runtime state sharing between the instances belonging to a proxy cluster. Options configured through this policy affect other policies such as rate limit and OIDC. This policy is applied to all the instances in a proxy cluster. If the proxy cluster is shared between environments, any changes made to this policy will affect all the other environments. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- Create an environment or edit an existing one. -- Check the cluster config settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each cluster. Save and publish the changes. - -{{< call-out "note" >}} -We strongly recommend securing your Zone Sync environment by enabling TLS for your listeners and Zone Sync TLS verification for the policy. To do this, you'll need to provide server certificates, as well as Zone Sync certificates and CA certs. - -When adding a new instance to a cluster with the Zone Sync policy applied, make sure the instance is resolvable by DNS if a DNS server is used, or that the Zone Sync Server list is updated to include the instance if the list is provided manually. - -Similarly, when removing an instance from a cluster with the Zone Sync policy applied, be sure to do the necessary clean-up in the DNS resolver or the Zone Sync Server list. -{{< /call-out >}} - ---- - -## Policy Settings - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default | -|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| -| `tcpServer.listeners[].transportProtocol` | string | ["TCP"] | Stream listener to configure protocol for zone sync stream. | No | "TCP" | -| `tcpServer.listeners[].port` | integer | In range `1-65535` | Stream listener to configure port for zone sync stream. | Yes | | -| `tcpServer.listeners[].enableTLS` | boolean | fase/true | Stream listener to enable TLS for zone sync stream. | No | false | -| `tcpServer.listeners[].ipv6` | boolean | false/true | Stream listener to enable ipv6 for zone sync stream. | No | false | -| `tcpServer.hostnames` | array | | Configure hostnames | No | [] | -| `tcpServer.tlsCipher` | string | | Specifies the enabled ciphers. The ciphers are specified in the format understood by the OpenSSL library | No | ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5 | -| `tcpServer.tlsSessionCache.enable` | string | [ "on", "off", "none" ] | Specifies session parameters to avoid SSL handshakes for parallel and subsequent connections. | No | "on" | -| `tcpServer.tlsSessionCache.type` | string | [ "shared", "builtin" ] | Specifies session parameters to avoid SSL handshakes for parallel and subsequent connections. | No | "shared" | -| `tcpServer.tlsSessionCache.size` | string | ^([0-9]+)(([K\|M\|G]){1})$ | Maximum size of the Cache. Valid units are: K, M, G for kilobytes, megabytes, and gigabytes, respectively. | No | "10M" | -| `tcpServer.tlsProtocols` | array | ["TLSv1.1", "TLSv1.2", "TLSv1.3"] | Enables the specified protocols. | No | [ "TLSv1.2" ] | -| `tcpServer.tlsSessionTimeout` | string | ^([0-9]+)(([d\|h\|m\|s]){1})$ | Specifies cache timeout. Valid units are: s, m, h and d for seconds, minutes, hours, and days respectively. | No | "5m" | -| `zoneSyncServers[].hostname` | array | | Defines the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. | Yes | | -| `zoneSyncServers[].port` | array | | Defines the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. | Yes | | -| `enableZoneSyncTLS` | boolean | false | Enables the TLS protocol for connections to another cluster server. When this is enabled certificates need to be provided in the data section of the policy. | No | System assigned | -| `enableZoneSyncCertVerify` | boolean | false | Enables the TLS verification for connections to another cluster server. When this is enabled certificates need to be provided in the data section of the policy. | No | System assigned | -| `zoneSyncCertChainVerifyDepth` | integer | 1 | Sets the verification depth for another cluster server certificates chain. | No | System assigned | -| `zoneSyncEnableSNI` | boolean | false | Enables or disables passing of the server name through TLS Server Name Indication (SNI) when establishing a connection with another cluster server. | No | System assigned | -| `zoneSyncTLSName` | string | In range `1–110` | Allows overriding the server name used to verify the certificate of a cluster server and to be passed through SNI when establishing a connection with the cluster server. | No | | -| `zoneSyncBuffers.number` | integer | 1–128 | Configure size and umber of per-zone buffers used for pushing zone contents. A single buffer must be large enough to hold any entry of each zone being synchronized. | No | 8 | -| `zoneSyncBuffers.size` | string | ^[0-9]+[K\|M\|G]{1}$ | Configure size and umber of per-zone buffers used for pushing zone contents. A single buffer must be large enough to hold any entry of each zone being synchronized. | No | "8k" | -| `zoneSyncConnectionRetryInterval` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines an interval between connection attempts to another cluster node. | No | "1s" | -| `zoneSyncConnectionTimeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines a timeout for establishing a connection with another cluster node. | No | "5s" | -| `zoneSyncInterval` | string | ^[0-9]+[h\|m\|s]{1}$ | Defines an interval for polling updates in a shared memory zone. | No | "1s" | -| `zoneSyncTimeout` | string | ^[0-9]+[h\|m\|s]{1}$ | Sets the timeout between two successive read or write operations on connection to another cluster node. | No | "1s" | - -{{< /bootstrap-table >}} - - ---- - -## Adding Cluster Zone Sync Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an Cluster Zone Sync policy using the REST API, send an HTTP `PUT` request to the Add-Endpoint-Name-Here endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|---------------------| -| `PUT` | `/infrastructure/workspaces/{infraWorkspaceName}/proxy-clusters/{clusterName}` | - -{{}} - - -
      -JSON request - Cluster Zone Sync with minimum configuration - -``` json -{ - "cluster-zone-sync": [ - { - "action": { - "tcpServer": { - "listeners": [ - { - "port": 12345 - } - ] - }, - "zoneSyncServers": [ - { - "name": "nginx-cluster-instance-1.com", - "port": 12345 - }, - { - "name": "nginx-cluster-instance-2.com", - "port": 12345 - } - ] - } - } - ] -} -``` - -
      - -
      -JSON request - Cluster Zone Sync with DNS Resolver - -``` json -{ - "cluster-zone-sync": [ - { - "action": { - "tcpServer": { - "listeners": [ - { - "port": 12345 - } - ], - }, - "resolver": { - "enableIPv6": false, - "valid": "30s", - "timeout": "5s", - "servers": [ - { - "hostname": "192.0.2.0" - } - ] - }, - "zoneSyncServers": [ - { - "name": "nginx-cluster.com", - "port": 12345 - } - ] - } - } - ] -} -``` - -
      - -
      -JSON request - Cluster Zone Sync with DNS Resolver and TCP Server TLS enabled - -``` json -{ - "cluster-zone-sync": [ - { - "action": { - "tcpServer": { - "listeners": [ - { - "port": 12345, - "tlsEnabled": true - } - ], - }, - "resolver": { - "enableIPv6": false, - "valid": "30s", - "timeout": "5s", - "servers": [ - { - "hostname": "192.0.2.0" - } - ] - }, - "zoneSyncServers": [ - { - "name": "nginx-cluster.com", - "port": 12345 - } - ] - }, - "data": { - "serverCerts": [ - { - "key": "", - "cert": "" - } - ] - } - } - ] -} - -``` - -
      - -
      -JSON request - Cluster Zone Sync with secure TLS between nodes in a cluster - -``` json -{ - "cluster-zone-sync": [ - { - "action": { - "tcpServer": { - "listeners": [ - { - "port": 12345, - "tlsEnabled": true - } - ], - }, - "resolver": { - "enableIPv6": false, - "valid": "30s", - "timeout": "5s", - "servers": [ - { - "hostname": "192.0.2.0" - } - ] - }, - "enableZoneSyncTLS": true, - "zoneSyncServers": [ - { - "name": "nginx-cluster.com", - "port": 12345 - } - ] - }, - "data": { - "serverCerts": [ - { - "key": "", - "cert": "" - } - ], - "zoneSyncCerts": [ - { - "key": "", - "cert": "" - } - ], - "zoneSyncTrustedCACert": "" - } - } - ] -} - -``` - -
      - -
      -JSON request - Cluster Zone Sync with all options specified - -``` json -{ - "cluster-zone-sync": [ - { - "action": { - "tcpServer": { - "listeners": [ - { - "port": 12345, - "tlsEnabled": true - } - ], - "hostnames": ["10.0.0.9"], - "tlsProtocols": ["TLSv1.1", "TLSv1.2", "TLSv1.3"], - "tlsCipher": "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384", - "tlsSessionCache": { - "enable": "none" - }, - "tlsSessionTimeOut": "15m" - }, - "resolver": { - "enableIPv6": false, - "valid": "30s", - "timeout": "5s", - "servers": [ - { - "hostname": "192.0.2.0" - } - ] - }, - "enableZoneSyncTLS": true, - "enableZoneSyncCertVerify": true, - "zoneSyncCertChainVerifyDepth": 2, - "zoneSyncEnableSNI": true, - "zoneSyncTLSServerName": "custom-sni-host.com", - "zoneSyncServers": [ - { - "name": "nginx-cluster.com", - "port": 12345 - } - ], - "syncBuffers": { - "number": 10, - "size": "8k" - }, - "connectionRetryInterval": "8s", - "connectionTimeout": "10m", - "timeout": "5s", - "interval": "1s" - }, - "data": { - "serverCerts": [ - { - "key": "", - "cert": "" - } - ], - "zoneSyncCerts": [ - { - "key": "", - "cert": "" - } - ], - "zoneSyncTrustedCACert": "" - } - } - ] -} - -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create a Cluster Zone Sync policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that contains your cluster's environment from the list of workspaces. -4. In the **Environments** section, select the environment name for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Cluster Config**. -6. On the left menu, select **Cluster Policies**. -7. Locate the **Cluster Zone Sync** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. -8. On the **Cluster Zone Sync** form, complete the necessary fields: - - - **TLS Server Settings - Port**: Specify port for zone sync stream server. - - **Zone Sync Settings - hostname**: Enter the address of a cluster node. The address can be specified as a domain name or IP address. A domain name that resolves to several IP addresses defines multiple nodes at once. -9. Select **Add** to apply the policy to the cluster. -10. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} - ---- - -## Verify the Policy - -Confirm that the policy has been set up and configured correctly by taking these steps: - -- Verify OIDC KeyValue Zone Sync is synchronized between instances within a cluster. -- Verify no OIDC session issues are presented when using multiple instances in a cluster. -- Verify applied rate limit for a proxy in a cluster is synchronized between instances within a cluster. - ---- - -## Troubleshooting - -For help resolving common issues when setting up and configuring the policy, follow the steps in this section. If you cannot find a solution to your specific issue, reach out to [NGINX Customer Support]({{< ref "/nms/support/contact-support.md" >}}) for assistance. - -### Issue 1 - -When the runtime state is not syncing between the instances in a desired proxy cluster. - -Resolution/Workaround: - -1. Ensure the tcp listener port of each instance is accessible within the desired proxy cluster. -2. By default the tcp listener port is open for all, but if you've provided tcp hostnames, then ensure the desired hostnames are resolvable. - -### Issue 2 - -If you see errors in the NGINX logs related to TLS when TLS is enabled (`enableZoneSyncTLS` is set to `true`) and zone sync servers are IP addresses, try the following workarounds: - -Resolution/Workaround: - -1. Ensure that you override the default server name (`zoneSyncTLSName`) used to verify the certificate of the desired cluster server to match the Subject Alternative Name of the cert provided. -2. If you are using DNS for zone sync servers, make sure you use the correct certificate that matches the hostname provided under zone sync server option (`zoneSyncServers[].hostname`). diff --git a/content/nms/acm/how-to/policies/cors.md b/content/nms/acm/how-to/policies/cors.md deleted file mode 100644 index e4b2ef3e6..000000000 --- a/content/nms/acm/how-to/policies/cors.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - handle Cross-Origin Resource Sharing for your backend services. -nd-docs: DOCS-1130 -title: CORS -toc: true -weight: 500 -type: -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -The CORS policy allows users to configure API Gateways to set the required headers to allow Cross-Origin Resource Sharing (CORS). CORS is a series of headers instructing web browsers which origins should be permitted to load resources other than the API Gateway origin. - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running. -- An [API gateway environment]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) -- A [published API Gateway]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - ---- - -## Policy Settings - - - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default | -| ------------------- | ------------ | ---------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------- | -| `allowCredentials` | boolean | `true`, `false` | When set to `true`, the `Access-Control-Allow-Credentials` header is set to `true` for all responses. | No | `false` | -| `allowHeaders` | string array | Example: `["X-header-name", "Authorization"]` | Used to set the `Access-Control-Allow-Headers` header, which tells the browser which headers can be used in the request. | No | `["Authorization", "Origin", "Content-Type", "Accept", "X-Cache-Status"]` | -| `allowMethods` | string array | `["GET", "HEAD", "PUT", "PATCH", "POST", "DELETE", "OPTIONS", "TRACE", "CONNECT"]` | Used to set the `Access-Control-Allow-Methods` header, which tells the browser which methods can be used in the request. | No | `["GET", "HEAD", "OPTIONS"]` | -| `allowOrigins` | Origin array | Example: `[{"exact":"example1.com"},{"exact":"example2.com"}]` | Used to set the `Access-Control-Allow-Origins` header, which tells the browser which origins can make a request. If set to `[{"exact":"*"}]` all origins will be accepted. | No | `[{"exact":"*"}]` | -| `exposedHeaders` | string array | Example: `[ "header-name", "x-correlation-id", "*" ]` | Used to set the `Access-Control-Expose-Headers` header, which tells the browser which headers can be accessed in the response. | No | `[]` | -| `maxAge` | integer | 5–60000 | Used to set the `Access-Control-Max-Age` header, which tells the browser what is the maximum length of time in seconds that preflight requests can be cached | No | N/A | -| `preflightContinue` | boolean | `true`, `false` | When set to `true`, preflight requests are proxied to the backend service. Otherwise, they are handled by the API Gateway. | No | `false` | - -{{< /bootstrap-table >}} - - -{{< call-out "note" >}} -Setting a wildcard (`*`) in `exposedHeaders` does not include headers related to `Access-Control-Allow-Credentials`; those must explicitly be added to exposed headers. -{{< /call-out >}} - ---- - -## Adding XYZ Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an CORS policy using the REST API, send an HTTP `PUT` request to the Proxies endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -| ------ | ------------------------------------------------------- | -| `POST` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | - -{{}} - - -
      -JSON request - -``` json -{ - "policies": { - "cors": [ - { - "action": { - "allowCredentials": true, - "allowMethods": [ - "GET", "HEAD", "PUT", "PATCH", "POST" - ], - "allowOrigins": [ - { - "exact": "example.com" - } - ], - "exposedHeaders": [ - "header-name", "x-correlation-id" - ], - "maxAge": 30000 - } - } - ] - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Services**. -3. Select a workspace in the list that contains the API Proxy you want to update. -4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -5. On the left menu, select **API Proxy > Advanced > Policies**. -6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **CORS**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -7. Modify the CORS configuration as needed. -8. Select **Save** to apply the policy to the API Proxy. -9. Select **Save and Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} - -{{
      }} - ---- diff --git a/content/nms/acm/how-to/policies/error-response-format.md b/content/nms/acm/how-to/policies/error-response-format.md deleted file mode 100644 index f69a1e3af..000000000 --- a/content/nms/acm/how-to/policies/error-response-format.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager - to configure the Error Response Format policy that customizes HTTP error codes and - messages. -nd-docs: DOCS-1345 -title: Error Response Format -toc: true -weight: 550 -type: -- how-to ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -This policy specifies how the API gateway will intercept HTTP errors from the backend(s) and respond to the client with a standard or customized error response in JSON format. -The client will receive the Custom Status and Error Message in JSON format, instead of the standard HTTP error coming from the backend. -The Error Response Format policy is applied by default to any new environment. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, follow these steps: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Review the advanced settings for the environment to confirm if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings - -{{< call-out "note" >}} - -Either `errorMessage` or `errorMessageBody` must be provided for each error code. - -{{< /call-out >}} - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default Value | -|------------------------|---------|------------------------------------|---------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `errorCode` | int | In range `400-599` | The error code that needs to be used by the NGINX data-plane to return to the user. | Yes | N/A | -| `errorMessage` | string | Max length `2048` | The customized error message that needs to be used by the NGINX data-plane to convey error information. | No | N/A | -| `errorMessageBody` | object | Example: `{"errMsg":"My Message"}` | The customized JSON errors that needs to be used by the NGINX data-plane to convey error information to the user. | No | N/A | - -{{< /bootstrap-table >}} - ---- - -## Applying the Policy - -You can apply this policy using the web interface or the REST API. - -
      - -{{}} - -{{%tab name="API"%}} - -To create an Error Response Format policy using the REST API, send an HTTP `POST` request to the environment endpoint. - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - -
      -JSON request - -```json -{ - "policies": { - "error-response-format": [ - { - "systemMetadata": { - "appliedOn": "inbound", - "context": "global" - }, - "action": { - "400": { - "errorCode": "13", - "errorMessage": "Bad Request" - } - } - } - ] - } -} -``` - -This JSON example defines an Error Response policy. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To add an Error Response Format policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -1. On the left menu, select **Infrastructure**. -1. Choose the workspace that includes the environment for the cluster you want to add the policy to. -1. Select the environment. -1. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. -1. On the left menu, select **Global Policies**. -1. From the list of policies, locate the **Error Response Format** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). -1. Configure the associated **Error Code**, **Error Message** and **Error Message Body** for each error code. -1. Additional entries can be created by selecting “Add Error Code” at the bottom of the table. -1. Select **Add** to apply the policy to the cluster. -1. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/grpc-policies.md b/content/nms/acm/how-to/policies/grpc-policies.md deleted file mode 100644 index 7dd107e2d..000000000 --- a/content/nms/acm/how-to/policies/grpc-policies.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - configure policies for your gRPC API Gateway. -nd-docs: DOCS-1084 -toc: true -weight: 600 -title: gRPC -type: -- how-to ---- - -{{< shortversions "1.3.0" "latest" "acmvers" >}} - -## Overview - -{{< include "acm/how-to/policies-intro.md" >}} - -Refer to the [Set Up Policies]({{< ref "/nms/acm/how-to/policies/manage-policies.md" >}}) topic for instructions on how to configure policies for your API Gateway and Developer Portal clusters and API Proxies. - ---- - -## Global Policies - -### Return Default gRPC Status Codes {#grpc-status-codes} - -The default NGINX error pages are suitable for conventional HTTP traffic. gRPC clients, however, expect [gRPC responses](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md). - -To return default gRPC status codes, send a POST request to the Environments endpoint. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------| -| POST | `/infrastructure/workspaces//environments` | - -{{}} - - -
      -Example JSON request - -```json -{ - "name": "{{environmentname}}", - "type": "NON-PROD", - "functions": [ - "API-GATEWAY" - ], - "proxies": [ - { - "proxyClusterName": "{{instanceGroupName}}", - "hostnames": [ - "{{environmentHostname}}" - ], - "runtime": "GATEWAY-PROXY", - "listeners": [ - { - "port": 8085, - "transportProtocol": "GRPC" - } - ], - "policies": { - "error-response-format": [ - { - "action": { - "400": { - "errorCode": "13", - "grpcStatusCode": 5, - "errorMessage": "Bad Request" - } - } - } - ] - } - } - ] -} -``` - -
      - -### Log Format {#grpc-log-format} - -Use the following variables to log gRPC-specific information. These variables are enabled by default for gRPC APIs. - -{{}} - -| Variable | Description | -|---------------|----------------------------------------------------------------------------------------------------------------------| -| `grpcMethod` | The RPC method invoked in the call. | -| `grpcService` | The service; for example, `routeguide.RouteGuide` | -| `grpcStatus` | The gRPC [status code](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md) returned by the upstream server. | -| `grpcMessage` | The `grpc-message` trailer/header | - -{{< /bootstrap-table >}} - -Take note of the following considerations when using these standard log format variables for logging gRPC details: - -- `requestURI` - This is the relative URI of the gRPC method. The HTTP2 `:path` pseudo-header is used for this. -- `timestamp` - For streaming methods, this value reflects when the stream is closed. -- `totalLatency` - For streaming methods, this value reflects the entire duration of the stream. -- `bodySize` - For streaming methods, this value counts all of the bytes sent during the duration of the stream and not for individual messages. - -### Request Body Size Limit - -For streaming methods, the request body size limit is enforced on the entire stream, not per individual message. Therefore, we recommend configuring the limit to be very large or disabling this policy altogether for long-lived streams. - ---- - -## API Proxy Policies - -### Auth Policies - -The following policies involve some degree of header reading and modifying depending on their configuration and work the same with [gRPC metadata](https://grpc.io/docs/what-is-grpc/core-concepts/#metadata): - -- API Key -- Basic Auth -- JWT Assertion -- OAuth2 Introspection - -Select `header` for any policy setting that configures the supplied-in value. - -For example, suppose the `Authorization` header is used for the API Key authentication, and credential forwarding has been enabled. In that case, the following example Go server code can access that value in the metadata as shown below: - -```go -// GetFeature returns the feature at the given point. -func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { -md, _ := metadata.FromIncomingContext(ctx) -fmt.Printf("Authorization: %+v\n", md.Get("Authorization")) -``` - -You can also modify these policies' error return conditions, so they return custom gRPC status codes. - -### Backend Configuration - -There is a separate policy for configuring upstream connection behavior specifically for the gRPC backend service. - -- In the web Interface, select the **Backend Config** policy. -- In the REST API, use the `grpc-backend-config` policy. - -By default, the following actions have a configured timeout of 7 days: - -- Reading client request headers (`client_header_timeout`) -- Reading client request body (`client_body_timeout`) -- Reading a response from the upstream gRPC server (`grpc_read_timeout`) -- Transmitting a request to the upstream gRPC server (`grpc_send_timeout`) - -You can configure this policy to override most of these values. - -### Health Check - -gRPC-specific health checks can be configured for backends that implement the [official protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md), as well as those that don't by using an [unimplemented status code](https://docs.nginx.com/nginx/admin-guide/load-balancer/grpc-health-check/#grpc-servers-that-do-not-accept-health-checking-protocol). - -Conventional HTTP-based health checks can also be configured, but they cannot be used alongside gRPC healthchecks. - -### Customize gRPC Status Codes - -You can customize the following policies' [gRPC status code](https://github.com/grpc/grpc/blob/master/doc/statuscodes.md) values: - -- Rate Limit -- API Key -- Basic Auth -- JWT Assertion -- OAuth2 Introspection -- ACL IP -- TLS Inbound -- Request Body Size Limit - -- In the web interface, any policy which contains *Error Handling* properties accepts `grpcStatusCode` rather than HTTP `returnCode`. -- In the API, any policy with the `errorReturnConditions` object that contains a `returnCode` property, or a `returnCode` property at the top level, accepts a `grpcStatusCode` instead of or in addition to `returnCode`. - -
      - Example JSON request - - ```json - "policies": { - "acl-ip": [ - { - "action": { - "allow": ["10.0.0.2"], - "grpcStatusCode": 13 - } - } - ] - } - ``` - -
      - -### Unsupported Policies - -The following policies are not supported at this time: - -- Proxy Cache -- CORS -- Allow HTTP Method diff --git a/content/nms/acm/how-to/policies/health-check.md b/content/nms/acm/how-to/policies/health-check.md deleted file mode 100644 index c4a1ce481..000000000 --- a/content/nms/acm/how-to/policies/health-check.md +++ /dev/null @@ -1,308 +0,0 @@ ---- -description: Learn how to create, configure, and implement health check policies for - your HTTP and gRPC API Proxies using F5 NGINX Management Suite API Connectivity - Manager. -nd-docs: DOCS-1125 -title: Health Check -toc: true -weight: 610 -type: -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro.md" >}} - -## About the Policy - -API Connectivity Manager can configure your API Proxies to continually test your backend service targets (upstream servers), avoid the servers that have failed, and gracefully add the recovered services to a load-balanced group. This continuous testing is also known as "Active Health Checks," whereas "Passive Health Checks" monitor transactions as they occur. - -More information on NGINX health checks can be found at: - -- -- - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## HTTP Health Checks - -### Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running -- You have [one or more Environments with an API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway.md" >}}) -- You have [published one or more API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy.md" >}}) -- Your backend service(s) has an HTTP health check endpoint and/or can return status codes in the range from 200 through 399 for health check requests. - -### Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values |
      Description
      | Required | Default value | -|----------------------------------------------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `targetBackendPolicyLabel` | string | Example: `default` | This field is used to target a specific backend by label. | No | `default` | -| `transportProtocol` | string | One of `["http"]` | The transport protocol used by the service. Only http is supported for now. | No | `http` | -| `isMandatory` | bool | `true/false` | Requires every newly added server to pass all configured health checks before F5 NGINX Plus sends traffic to it. | No | `false` | -| `persistent` | bool | `true/false` | Determines whether previous state is remembered after reloading configuration. | No | `false` | -| `port` | int | In range `1-65535` | The port on the service that will provide the health check. | No | N/A | -| `interval` | int | Integer (Max 2147483647) | The length of time between each health check sent from Nginx to the respective service. | No | 5 | -| `unhealthyThreshold` | int | Integer (Max 2147483647) | Denotes the number of failed checks before the service is considered unhealthy. | No | 1 | -| `healthyThreshold` | int | Integer (Max 2147483647) | Denotes the number of successful checks before the service is considered healthy. | No | 1 | -| `http`
      `.uriPath` | string | Example: `/health` | The URI used for the health check and is appended to the server domain name or IP address | No | `/` | -| `http`
      `.responseMatch`
      `.statusCode`
      `.exact` | int | In range `100-599` | List of specific status codes to match against | No | N/A | -| `http`
      `.responseMatch`
      `.statusCode`
      `.range` | string | Example: `["200-399"]` | List of status code ranges to match against | No | N/A | -| `http`
      `.responseMatch`
      `.header`
      `.name` | string | Example: `header-name` | Any valid header value from the response | Yes | N/A | -| `http`
      `.responseMatch`
      `.header`
      `.value` | string | Example: `header-value` | Any valid header name from the response | Yes | N/A | -| `http`
      `.responseMatch`
      `.header`
      `.condition` | string | Regex: `^([=!~]\|!~)$` | The matching operator for the header. Uses NGINX Health Check `match` directive syntax | Yes | N/A | -| `http`
      `.responseMatch`
      `.body`
      `.requiredVariable` | string | Example: `jsonFieldKey` | Field in json of body to match against | No | N/A | -| `http`
      `.responseMatch`
      `.body`
      `.value` | string | Example: `jsonFieldValue` | Any valid body content to be matched against | Yes | N/A | -| `http`
      `.responseMatch`
      `.body`
      `.condition` | string | Regex: `^!?~$` | The matching operator for the body. Uses NGINX Health Check `match` directive syntax | Yes | N/A | -| `connectTimeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Uses NGINX time measurement syntax | No | `1s` | -| `readTimeout` | string | Example: `60s` | Sets a timeout for reading a response from the proxied server. Uses NGINX time measurement syntax | No | `1s` | - -{{< /bootstrap-table >}} - - -### Create an HTTP Health Check Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create an HTTP health check policy, send an HTTP `POST` to the Proxies endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | - -{{
      }} - - -
      -JSON request - -``` json -{ - "policies": { - "backend-health-check": [ - { - "action": { - "targetBackendPolicyLabel": "default", - "transportProtocol": "http", - "isMandatory": true, - "persistent": true, - "port": 8080, - "interval": 5, - "unhealthyThreshold": 3, - "healthyThreshold": 2, - "http": { - "uriPath": "/health_check", - "responseMatch": { - "statusCode": { - "range": [ - "200-399" - ] - }, - "header": { - "name": "some-header", - "value": "ok", - "condition": "=" - }, - "body": { - "requiredVariable": "jsonField", - "value": "some-response-body", - "condition": "~" - } - } - }, - "connectTimeout": "10s", - "readTimeout": "10s" - } - } - ] - } -} -``` - -
      - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create a Health Check policy using the web interface: - -1. {{< include "acm/webui-acm-login.md" >}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the left-side *API Proxy* menu, select **Policies**. -1. On the *Advanced > Policies* page, on the **API Proxy** tab, locate the **Backend Health Check** policy. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -1. Complete the necessary fields: - - - **Apply the policy to**: Specify the label that was assigned to the backend service if it's different from the default value `default`. - - **Transport Protocol**: Specify the transport protocol of the health check. Currently, only HTTP is supported. - - **Is Mandatory**: Toggle the switch to on if every new service target (server) must pass all configured health checks before NGINX Plus sends traffic to it. - - **Port**: If the health check needs to be conducted on a port other than the one specified for the backend service targets, specify the port to use. - - **Interval**: The length of time between each health check sent from NGINX Plus to the backend service targets. - - **Unhealthy Threshold**: Denotes the number of failed checks before the service is considered unhealthy. - - **Health Threshold**: Denotes the number of successful checks before the service is considered healthy. - - **URI Path**: The endpoint (URI) that NGINX Plus uses for the health check requests. - - **Status Code Exact**: The list of specific HTTP status codes to match against in the backend response. - - **Status Code Range**: The list of HTTP status code ranges to match against in the backend response. - - **Header Name**: The name of the header to use in the backend response matching. - - **Header Condition**: The operator used when checking the header value. Refer to the [NGINX `match` directive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html?&_ga=2.33487351.893608448.1680639753-1533979881.1676506809#match) for specifics. - - **Header Value**: The header value to use in the backend response matching. - - **Body Required Variable**: The field in the JSON of the backend response body to match against. - - **Body Condition**: The operator used when checking the body value. Refer to the [NGINX `match` directive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html?&_ga=2.33487351.893608448.1680639753-1533979881.1676506809#match) for specifics. - - **Body Value**: The body value to use in the backend response matching. - - **Connection Timeout**: Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration file measurement units syntax. - - **Read Timeout**: Sets a timeout for reading a response from the proxied server. Follows NGINX configuration file measurement units syntax. - -1. Select **Add** to apply the Health Check policy to the API Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} - -{{
      }} - -### Verify the Policy - -Confirm that the policy has been set up and configured correctly by taking these steps: - -- Check that your backend service targets (upstream servers) are receiving health check endpoint calls. -- When `isMandatory` is set to `true`, verify that your backend service targets are not receiving proxied traffic until they clear the health checks. -- When `persistent` is set to `true`, the state and behavior for `interval`, `unhealthyThreshold`, `healthyThreshold`, and timeout-related parameters should be preserved between subsequent deployments of API proxies and environments. - ---- - -## gRPC Health Checks - -### Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running -- You have [one or more Environments with a gRPC API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway.md" >}}) -- You have [published one or more gRPC API Gateways]({{< ref "/nms/acm/how-to/services/publish-grpc-proxy.md" >}}) -- Your backend service(s) implements the [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md#grpc-health-checking-protocol) and/or returns a status code (normally `12` for `unimplemented`) for health check requests. - -### Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default value | -|------------------------|----------|----------------------------|-----------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `targetBackendLabel` | string | Example: `default` | This field is used to target a specific backend by label. | No | `default` | -| `mandatory` | bool | `true/false` | Requires every newly added server to pass all configured health checks before NGINX Plus sends traffic to it. | No | `false` | -| `persistent` | bool | `true/false` | Determines whether previous state is remembered after reloading configuration. | No | `false` | -| `port` | int | In range `1-65535` | The port on the service that will provide the health check. | No | N/A | -| `interval` | int | Integer (Max 2147483647) | The length of time between each health check sent from Nginx to the respective service. | No | N/A | -| `passes` | int | Integer (Max 2147483647) | Denotes the number of successful checks before the service is considered healthy. | No | N/A | -| `fails` | int | Integer (Max 2147483647) | Denotes the number of unsuccessful checks before the service is considered unhealthy. | No | N/A | -| `grpc`
      `.service` | string | Example: `RouteGuide` | Defines the target GRPC service to be used for this health check | No | N/A | -| `grpc`
      `.status` | int | Example: `12` | The expected GRPC status code return code from the upstream gRPC backend to conclude that the health check was successful | No | N/A | -| `connectTimeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Uses NGINX time measurement syntax | No | `1s` | -| `readTimeout` | string | Example: `60s` | Sets a timeout for reading a response from the proxied server. Uses NGINX time measurement syntax | No | `1s` | - -{{< /bootstrap-table >}} - - -### Create a gRPC Health Check Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create a gRPC health check policy, send an HTTP `POST` to the Proxies endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -
      -JSON request - -``` json -{ - "policies": { - "grpc-backend-health-check": [ - { - "action": { - "mandatory": true, - "persistent": true, - "port": 84, - "interval": 7, - "fails": 3, - "passes": 5, - "connectTimeout": "6s", - "readTimeout": "5s", - "grpc": { - "status": 12 - } - } - } - ] - } -} -``` - -
      - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create a gRPC Health Check policy using the web interface: - -1. {{< include "acm/webui-acm-login.md" >}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the left-side *API Proxy* menu, select **Policies**. -1. On the *Advanced > Policies* page, on the **API Proxy** tab, locate the **gRPC Backend Health Check** policy. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -1. Complete the necessary fields: - - - **Apply the policy to**: Provide the label that was assigned to a Backend Service if it is different from the default value `default - - **Is Mandatory**: Toggle the switch to on if every new service target (server) must pass all configured health checks before NGINX Plus sends traffic to it. - - **Port**: If the health check needs to be conducted on a port other than the one specified for the backend service targets, specify the port to use. - - **Interval**: The length of time between each health check sent from NGINX Plus to the backend service targets. - - The **gRPC** setting will determine which health check service will be contacted by NGINX and which status code will be expected. - -1. Select **Add** to apply the Health Check policy to the API Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} - -{{
      }} - -### Verify the Policy - -Confirm that the policy has been set up and configured correctly by taking these steps: - -- Check that your backend service targets (upstream servers) are receiving health check endpoint calls. You may also find the tools [grpcurl](https://github.com/fullstorydev/grpcurl) and [grpc-health-probe](https://github.com/grpc-ecosystem/grpc-health-probe) helpful for debugging. -- When `mandatory` is set to `true`, verify that your backend service targets are not receiving proxied traffic until they clear the health checks. -- When `persistent` is set to `true`, state and behavior for `interval`, `passes`, `fails`, and timeout related parameters should be preserved between subsequent deployments of API Proxies and Environments. diff --git a/content/nms/acm/how-to/policies/http-backend-configuration.md b/content/nms/acm/how-to/policies/http-backend-configuration.md deleted file mode 100644 index 5732998db..000000000 --- a/content/nms/acm/how-to/policies/http-backend-configuration.md +++ /dev/null @@ -1,813 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager - to manage HTTP API Gateways by applying a backend configuration policy. -nd-docs: DOCS-1141 -toc: true -weight: 650 -title: HTTP Backend Configuration -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## About the Backend Configuration Policy - -The backend configuration policy allows API Owners to manage their backend services with a common set of configuration options. These configuration options are applied to all service targets in a given backend service. - -The backend configuration policy provides the ability to configure: - -- [Load balancing](#load-balancing) -- [Keep-Alive connections](#keep-alive-connections) -- [Connection settings](#connection-settings) -- [Queues](#queues) -- [Buffers](#buffers) -- [Session Cookies](#session-cookies) -- [NTLM Authentication](#ntlm-authentication) - -Later sections of this guide will cover each of these areas in turn. - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with an [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have published one or more [API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Target Backend Service - -It is possible to target specific backend services with a backend configuration policy through the use of labels. Backend services whose label matches that configured in the backend configuration policy target backend policy label will have that configuration applied. If no target backend policy label is provided, the backend configuration policy will be applied to all backend services with the label is set as default. - -### Configuring Target Backend Service - -Take the steps in this section to configure a backend configuration policy for specific backend service targets by label. In the example below, the backend configuration policy keepalive settings will be applied to all backend service targets with the `petstore-api` label. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a load balancer configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies": { - "backend-config": [ - { - "action": { - "targetBackendPolicyLabel" : "petstore-api", - "keepCacheConnectionAlive": 32, - "keepAliveRequests": 1000, - "keepAliveTime": "1h", - "keepAliveTimeout": "60s" - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|----------------------------|---------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|---------------| -| `targetBackendPolicyLabel` | string | Example: `petstore-api` | Target backend labels for policy application. If not supplied this backend service configuration would be applied to the default backend service of the API proxy. | No | `default` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. To apply the backend configuration policy to backend service targets, set the **Target Backend Policy Label** as the label of the backend service targets. -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Load Balancing - -Six load balancing options are available; round robin (default), least connections, least response time, hashed key value, IP hash, or random. - -### Balancing Algorithms - -#### Round Robin - -This algorithm distributes requests to the application in a round-robin fashion to each backend service target in equal and circular order. As the default load balancing algorithm, it applies to all upstream server blocks containing backend service targets. - -#### Least Connections - -This algorithm distributes requests to the server with the least number of active connections. If there are several servers, they are tried sequentially using the round-robin balancing method. - -#### Least Time - -{{< call-out "note" >}} This load balancing algorithm is available as part of the F5 NGINX Plus commercial subscription. {{< /call-out >}} - -This algorithm distributes requests to the server with the least average response time and least number of active connections. If there are several servers, they are tried sequentially using the round-robin balancing method. - -If the `HEADER` measurement is specified, the time to receive the response header is used. If the `LAST_BYTE` measurement is specified, the time to receive the full response is used. If the `LAST_BYTE_INFLIGHT` parameter is specified, incomplete requests are also considered. - -#### Hash - -This algorithm distributes requests with client-server mapping based on the hashed `key` value. The `key` can contain text, variables, and their combinations. Note that adding or removing a server from the group may result in remapping most of the keys to different servers. The method is compatible with the [Cache::Memcached](https://metacpan.org/pod/Cache::Memcached) Perl library. - -If the `consistent` parameter is specified, the [ketama](https://www.metabrew.com/article/libketama-consistent-hashing-algo-memcached-clients) consistent hashing method will be used instead. The method ensures that only a few keys will be remapped to different servers when a server is added to or removed from the group. This helps to achieve a higher cache hit ratio for caching servers. The method is compatible with the [Cache::Memcached::Fast](https://metacpan.org/pod/Cache::Memcached::Fast) Perl library with the `ketama_points` parameter set to 160. - -#### IP Hash - -This algorithm distributes requests between servers based on client IP addresses. The first three octets of a client's IPv4 address, or an entire IPv6 address are used as a hashing key, ensuring that requests from the same client will always be passed to the same server except when the server is unavailable. In the latter case, client requests will be passed to another server. Most probably, it will always be the same server as well. - -If one of the servers needs to be temporarily removed, it should be marked with the down parameter to preserve the current hashing of client IP addresses. - -### Configuring a Load Balancer - -Follow the steps in this section to configure request load balancing across backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a load balancer configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies": { - "backend-config": [ - { - "action": { - "loadBalancing": { - "algorithm": "ROUND_ROBIN", - "leastTimeMeasurement": "HEADER", - "hashKey": "$request_uri", - "consistentHashing": true, - "randomTwo": true, - "randomMethod": "LEAST_CONN" - } - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|------------------------|---------|--------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|---------------| -| `algorithm` | string | One of:
      [`ROUND_ROBIN`, `LEAST_CONN`, `LEAST_TIME`, `HASH`, `IP_HASH`, `RANDOM`] | The load balancing algorithm to use. Default `ROUND_ROBIN` is used without any configuration. | No | `ROUND_ROBIN` | -| `leastTimeMeasurement` | string | One of:
      [`HEADER`, `LAST_BYTE`, `LAST_BYTE_INFLIGHT`] | Optional configuration option for `LEAST_TIME` algorithm. The measurement used to determine `LEAST_TIME`. | No | `HEADER` | -| `hashKey` | string | Text, variables, and their combinations. | Required configuration option for `HASH` algorithm. Example: `$request_uri` | Semi-optional | N/A | -| `consistentHashing` | boolean | `true/false` | Optional configuration option for `HASH` algorithm. Uses ketama consistent hashing method. | No | `true` | -| `randomTwo` | boolean | `true/false` | Optional configuration option for `RANDOM` algorithm. Instructs NGINX to randomly select two servers and then choose a server using the specified `randomMethod`. | No | `true` | -| `randomMethod` | string | One of:
      [`LEAST_CONN`, `LEAST_TIME`, `LAST_TIME_HEADER`, `LEAST_TIME_LAST_BYTE`] | Optional configuration option for `RANDOM` algorithm. Specifies which load balancing algorithm to use for a randomly selected server. | No | `LEAST_CONN` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. To enable a load balancer other than the default round-robin, enable the toggle for **Add an alternate load balancer**. -1. Select your **Load Balancing Algorithm** from the drop-down menu. - - For `LEAST_TIME` define the **Least Time Measurement** - - For `HASH` define the **Hash Key** and if **Consistent Hashing** is required. - - For RANDOM set if **Random Two** should be used and the **Random Method** load balancing algorithm. -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} - ---- - -## Keep-Alive Connections - -HTTP keepalive (persistent) connections [[RFC-2068]](https://www.rfc-editor.org/rfc/rfc2068.html#section-8) are a necessary performance feature that reduce latency and allow web pages to load faster. HTTP uses a mechanism called keepalive connections to hold open the TCP connection between the client and the server after an HTTP transaction has completed. If the client needs to conduct another HTTP transaction, it can use the idle keepalive connection rather than creating a new TCP connection. - -If lots of clients use HTTP keepalives and the web server has a concurrency limit or scalability problem, then performance plummets once that limit is reached. It does not take many clients to exhaust the concurrency limit in many contemporary web and application servers and any thread‑ or process‑based web or application server is vulnerable to concurrency limitations. - -NGINX uses a different architecture that does not suffer from the concurrency problems described above. It transforms slow client connections to optimized benchmark‑like connections to extract the best performance from your servers. This allows each NGINX process to easily scale to tens, thousands, or hundreds of thousands of connections simultaneously. - -### Configuring Keep-Alive Connections - -Follow the steps in this section to configure HTTP keepalives for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a keepalive connection configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "keepCacheConnectionAlive": 32, - "keepAliveRequests": 1000, - "keepAliveTime": "1h", - "keepAliveTimeout": "60s" - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|----------------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `keepCacheConnectionAlive` | integer | integer >= `1` | Activates the cache for connections to upstream servers. Sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this number is exceeded, the least recently used connections are closed. | No | `32` | -| `keepAliveRequests` | integer | integer >= `1` | Sets the maximum number of requests that can be served through one keepalive connection. | No | `1000` | -| `keepAliveTime` | string | Example: `1h` | Limits the maximum time during which requests can be processed through one keepalive connection. Follows NGINX configuration time measurement units syntax. | No | `1h` | -| `keepAliveTimeout` | string | Example: `60s` | Sets a timeout during which an idle keepalive connection to an upstream server will stay open. Follows NGINX configuration time measurement units syntax. | No | `60s` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Keep-Alive Connection Settings** section. -1. If non-default values are required, enter configuration values for: - - Keep-Alive Max Cache Connections Alive - - Keep-Alive Requests - - Keep-Alive Time - - Keep-Alive Timeout -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Connection Settings - -The connection settings can be configured for maximum client request body size, establishing a connection timeout, maximum time for reading a response from the proxied server, or maximum time transmitting a request to the proxied server. - -### Client Max Body Size - -Sets the maximum allowed size of the client request body. If the size of a request exceeds the configured value, the `413 (Request Entity Too Large)` error is returned to the client. - -### Connect Timeout - -Defines a timeout for establishing a connection with a proxied server. Please note that this timeout cannot usually exceed 75 seconds. - -### Read Timeout - -Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. The connection is closed if the proxied server does not transmit anything within this time. - -### Send Timeout - -Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. The connection is closed if the proxied server does not receive anything within this time. - -### Configuring Connection Settings - -This section explains how to configure connection settings for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add request settings configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "clientMaxBodySize" : "2m", - "connectTimeout": "30s", - "readTimeout": "30s", - "sendTimeout": "30s" - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|---------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `clientMaxBodySize` | string | Example: `2m` | Sets the maximum allowed size of the client request body. Follows NGINX configuration file measurement units syntax. | No | N/A | -| `connectTimeout` | string | Example: `30s` | Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | -| `readTimeout` | string | Example: `30s` | Sets a timeout for reading a response from the proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | -| `sendTimeout` | string | Example: `30s` | Sets a timeout for transmitting a request to the proxied server. Follows NGINX configuration time measurement units syntax. | No | N/A | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Connection Settings** section. -1. If non-default values are required, enter configuration values for: - - Connect Timeout - - Read Timeout - - Send Timeout - - Client Max Body Size -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Queues - -If an upstream server cannot be selected immediately while processing a request, the request will be placed into the queue. The queue configuration specifies the maximum number of requests that can be in the queue simultaneously. If the queue is filled up, or the server to pass the request to cannot be selected within the time period specified in the timeout parameter, the `502 (Bad Gateway)` error will be returned to the client. - -### Configuring a Queue - -Follow the steps in this section to configure a queue for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a queue configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "queue" : { - "maxNumberOfRequests": 10, - "timeOut": "60s" - } - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|-----------------------|---------|-----------------|--------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `maxNumberOfRequests` | integer | Example: `10` | Maximum number of requests that can be in the queue at the same time. If not set then no queue will be configured. | Yes | N/A | -| `timeout` | string | Example: `60s` | Sets a timeout for establishing a connection with a proxied server. Follows NGINX configuration time measurement units syntax. | No | `60s` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Queue Settings** section. -1. To configure a queue, enable the toggle for **Add a queue**. - - Set the **Maximum number of requests** (required). - - Set the **Queue timeout" (default 60s). -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Buffers - -{{< call-out "note" >}}See the [Module ngx_http_proxy_module](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) topic for more information about the directives mentioned in this section.{{< /call-out>}} - -When buffering is enabled, NGINX receives a response from the proxied server as soon as possible, saving it into the buffers set by the `proxy_buffer_size` and `proxy_buffers` directives. - -- Depending on the operating system, the `proxy_buffer_size` directive is 4 KB or 8 KB. This directive sets the buffer size for reading the first part of the response received from the proxied server. This part usually contains a small response header. By default, the buffer size is equal to one memory page. - -- The `proxy_buffers` directive controls the size and the number of buffers allocated for a request. Increasing the number of buffers lets you buffer more information. - -If the complete response doesn't fit into memory, a part can be saved to a temporary file on the disk. The default max size of this temporary file is 1024 MB, and the default write size is 8 KB or 16 KB, depending on the operating system. - -When configuring proxy buffers, the total size of the `proxy_buffers` (number * size) must be greater than the size of the `proxy_busy_buffers_size` minus one buffer. The default `proxy_busy_buffers_size` is 8 KB or 16 KB, depending on the operating system. - -If you get the error `[emerg] \"proxy_busy_buffers_size\"` `must be less than the size of all` `\"proxy_buffers\" minus one buffer` in NGINX in the data plane, it is because the proxy buffer total number and size are configured incorrectly. - -### Examples - -
      -Example valid Proxy Buffers number and size - -```text -proxy busy buffers size : 16 KB -proxy buffer number : 8 -proxy buffer size : 4 KB -total buffer size : 32 KB - -busy_buffers_size < total buffer size - buffer -16 KB < 32 KB - 4 KB -16 KB < 28 KB -True: Valid proxy buffer number & size configuration -``` - -
      - -
      -Example invalid proxy buffers number and size - -```text -proxy busy buffers size : 16 KB -proxy buffer number : 2 -proxy buffer size : 2k -total buffer size : 8 KB - -busy_buffers < total buffer size - buffer -16 KB < 8 KB - 2k -16 KB < 6k -False: Invalid proxy buffer number & size configuration -``` - -
      - -### Tuning Proxy Buffers Number and Size - -When using proxy buffering, we recommend that the complete response from upstream can be held in memory to avoid reading or writing to disk, which is significantly slower. - -If the response from upstream arrives fast and the client is slower, NGINX preserves the response in buffers, allowing it to close the upstream connection quickly. - -If the allocated buffer size doesn't allow storing the complete response in memory, it will be stored on disk, which is slower. - -Fine-tuning the `proxy_buffers` number and size depends on the body response size of your application. - -To determine the size of the HTML/data returned by a resource, you can use the following command: - -```bash -curl -so /dev/null https://nginx.org/ -w '%{size_download}' -``` - -Set `proxy_buffers` in a way that it equals the total maximum size of response data. - -For example, if the uncompressed body size is 8955 bytes (72 KB), you must set 72 KB worth of buffer size, either 18 4-KB-sized buffers or 9 8-KB-sized buffers. - -### Configuring Buffers - -Follow the steps in this section to configure buffers for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a buffer configuration to the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "buffer": { - "number": 42, - "size": "16KB" - } - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|----------|---------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `number` | integer | integer >= `2` | Sets the number of buffers used for reading a response from the proxied server for a single connection. | Yes | N/A | -| `size` | string | size >= `1K` | Sets the size of the buffers used for reading a response from the proxied server for a single connection. Follows NGINX configuration file measurement units syntax. | Yes | `60s` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Buffer Settings** section. -1. To configure a queue, enable the toggle for **Add a buffer**. - - Set the **Number of buffers** (required). - - Set the **Buffer size** (required). -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Session Cookies - -Enables session affinity, which causes requests from the same client to be passed to the same server in a group of servers. With the cookie method used, information about the designated server is passed in an HTTP cookie generated by NGINX. - -A request from a client not yet bound to a particular server is passed to the server selected by the configured balancing method. Further requests with this cookie will be passed to the designated server. If the designated server cannot process a request, the new server is selected as if the client has not been bound yet. - -As a load balancing method always tries to evenly distribute the load considering already bound requests, the server with a higher number of active bound requests has less possibility of getting new unbound requests. - -### Configuring Session Cookies - -Folow the steps in this section to configure session cookies for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add a session cookie configuration to the API Proxy through the backend-config policy. If any configuration parameters are omitted, the corresponding fields are not set. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "sessionCookie" : { - "name" : "auth_cookie", - "path" : "/path/to/set", - "expiresIn" : "1h", - "domainName" : ".example.com", - "httpOnly" : true, - "secure" : true, - "sameSite" : "STRICT" - } - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|--------------|---------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `name` | string | Example: `auth_cookie` | Sets the name of the cookie to be set or inspected. | Yes | N/A | -| `path` | string | Example: `/path/to/set` | Defines the path for which the cookie is set. | No | N/A | -| `expiresIn` | string | Example: `1h` | Sets cookie expiry. If the parameter is not specified, it will cause the cookie to expire at the end of a browser session. Follows NGINX configuration time measurement units syntax. | No | N/A | -| `domainName` | string | Example: `.example.com` | Defines the domain for which the cookie is set. Parameter value can contain variables. | No | N/A | -| `httpOnly` | boolean | `true/false` | Adds the `HttpOnly` attribute to the cookie. | No | N/A | -| `secure` | boolean | `true/false` | Adds the `Secure` attribute to the cookie. | No | N/A | -| `sameSite` | string | One of:
      [`STRICT`, `LAX`, `NONE`] | Adds the `SameSite` attribute to the cookie. | No | N/A | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Buffer Settings** section. -1. To configure session cookies, enable the toggle for **Session Affinity/Cookies Settings**. - 1. Set the **Name** of the cookie (required). - 1. Set the **Path** (optional). - 1. Set the cookie **Expires in** (optional). If the parameter is not specified, it will cause the cookie to expire at the end of a browser session. - 1. Set the **Domain Name** (optional). - 1. Enable the **HTTP Only** toggle to add the HttpOnly attribute to the cookie (optional). - 1. Enable the **Secure** toggle to add the Secure attribute to the cookie (optional). - 1. Set the **Same Site** attribute value (optional). -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} - ---- - -## NTLM Authentication - -Allows proxying requests with [NTLM Authentication](https://en.wikipedia.org/wiki/Integrated_Windows_Authentication). The upstream connection is bound to the client connection once the client sends a request with the `Authorization` header field value starting with `Negotiate` or `NTLM`. Further client requests will be proxied through the same upstream connection, keeping the authentication context. When enabled, the HTTP Protocol version is set to 1.1. - -### Configuring NTLM Authentication - -Follow the steps in this section to configure session cookies for your backend service targets. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to enable NTLM authentication for the API Proxy through the backend-config policy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "policies" : { - "backend-config" : [ - { - "action" : { - "enableNTLMAuthn": false - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|-------------------|---------|-----------------|-----------------------------------------------------|----------|---------------| -| `enableNTLMAuthn` | boolean | `true/false` | Enables proxying requests with NTLM Authentication. | No | `false` | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md">}} -1. On the left menu, select **Services**. -1. Select a workspace in the list that contains the API Proxy you want to update. -1. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **HTTP Backend Config**. -1. Go to the **Connection Settings** section. -1. To enable NTLM, enable the toggle for **Enable NTLM Authn**. -1. Select **Add** to apply the backend configuration policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{}} - ---- - -## Reference Backend Configuration Policy API Request Body - -```json -{ - "policies": { - "backend-config": [ - { - "action": { - "targetBackendPolicyLabel" : "default", - "keepCacheConnectionAlive": 32, - "keepAliveRequests": 1000, - "keepAliveTime": "1h", - "keepAliveTimeout": "60s", - "connectTimeout": "30s", - "readTimeout": "30s", - "sendTimeout": "30s", - "clientMaxBodySize": "2m", - "enableNTLMAuthn": false, - "loadBalancing": { - "algorithm": "LEAST_CONN", - "leastTimeMeasurement": "HEADER", - "hashKey": "$request_uri", - "consistentHashing": true, - "randomTwo": true, - "randomMethod": "LEAST_CONN" - }, - "queue": { - "maxNumberOfRequests": 10, - "timeOut": "60s" - }, - "buffer": { - "number": 8, - "size": "8k" - }, - "sessionCookie": { - "name": "auth_cookie", - "path": "/", - "expiresIn": "1h", - "domainName": ".example.com", - "httpOnly": true, - "secure": true, - "sameSite": "strict" - } - } - } - ] - } -} -``` diff --git a/content/nms/acm/how-to/policies/introspection.md b/content/nms/acm/how-to/policies/introspection.md deleted file mode 100644 index 7a4ec2266..000000000 --- a/content/nms/acm/how-to/policies/introspection.md +++ /dev/null @@ -1,423 +0,0 @@ ---- -description: API Owners can restrict access to their APIs with OAuth2 tokens. The - policy is configured to grant access to APIs after having tokens introspected. -nd-docs: DOCS-953 -toc: true -weight: 800 -Title: Introspection -type: -- concept ---- - -## Overview - -API Connectivity Manager API Owners can restrict access to their APIs with OAuth2 tokens by swapping an opaque token for claims or JWT token to be proxied to the backend service. The policy can be configured to grant access to APIs after having the tokens introspected. In addition, the claims in the token can be extracted and forwarded to the backend service. - ---- - -## What is OAuth2? - -{{< include "acm/tutorials/what-is-OAuth2.md" >}} - -### OAuth2 Roles - -The idea of roles is part of the core specification of the OAuth2 Authorization Framework. These define the essential components of an -OAuth2 system: - -- **Resource Owner**: An entity capable of granting access to a protected resource. It could be a system or an end-user. -- **Client**: An application making protected resource requests on behalf of the Resource Owner and with its authorization. -- **Authorization Server**: The server that issues access tokens to the client after successfully authenticating the resource owner and - obtaining authorization. The authorization server exposes two endpoints: the Token endpoint, which is involved in a machine-to-machine interaction for issuing access tokens, and the Introspection endpoint, which is used by the Resource Server to validate client access tokens. -- **Resource Server**: The server protecting the user resources capable of accepting and responding to protected resource requests using - access tokens. In this guide, NGINX running within the API Connectivity Manager API-Proxy is the Resource Server. - -### Token Introspection - -The standard method for validating access tokens with an IdP is called _Token Introspection_. _OAuth2 Token Introspection_ -[[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) is now a widely supported standard that describes a JSON/REST interface that a Resource Server uses to present a token to the IdP, and describes the structure of the response. It is supported by many of the leading IdP vendors and cloud providers. - -NGINX can be used to validate access tokens on behalf of backend services. This has several benefits: - -- Requests reach the backend services only when the client has presented a valid token -- Existing backend services can be protected with access tokens without requiring code changes -- Only the NGINX instance (not every app) needs to be registered with the IdP -- Behavior is consistent for every error condition, including missing or invalid tokens - -The _OAuth2 Token Introspection_ flow includes the following steps: - -{{OAuth2 Token Introspection Flow.}} - ---- - -## Set up OAuth2 Introspection Policy - -You can set up OAuth2 Introspection policy by using either the web interface or the REST API. - -### Edit the API-Proxy Settings - -{{}} - {{%tab name="Web Interface"%}} - -1. In the API Connectivity Manager user interface, select **Services > API Proxies**click the **...** icon in the **Actions** column for the API proxy that you want to enable the OAuth2 Introspection policy for, select **Edit Proxy**. -2. Under the **Advanced** section select **Policies**. -3. Under the **API Proxy** tab, locate the **OAuth2 Introspection** policy and click the **...** icon, select **Add Policy**. -4. Update **Client Request** settings. - -{{}} - -| Configuration Setting | Description | -|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Specifies the token's location in incoming user request | Specifies where the access token is supplied in the incoming user request and the key from which the access token can be extracted. The default behavior is as a Bearer token in the Authorization request header. | - -{{}} - -5. Update **Introspection Request** settings. - -{{}} - -| Configuration Setting | Description | -|-------------------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Enter the introspection endpoint | The IdP OAuth2 Token Introspection endpoint [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) where NGINX IdP client will send client `access_token`. | -| Enable SNI | Enables or disables passing of the server name through TLS Server Name Indication extension (SNI), [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066) when establishing a connection with the proxied HTTPS server. | -| Override the default server name | Allows overriding the server name used to verify the certificate of the proxied HTTPS server and to be passed through SNI when establishing a connection with the proxied HTTPS server. By default, the host part of the `proxy_pass` URL is used. | - -{{}} - -6. Update **Credentials**. - -{{}} - -| Configuration Setting | Description | -|----------------------------- |--------------------------------------------------------------------------| -| Enter Client Application ID | Identifies the IdP Client making the token introspection request. | -| Enter Client Secret/Password | The IdP Client secret/password. | - -{{}} - -7. Update **Introspection Response** settings. - -{{}} - -| Configuration Setting | Description | -|-------------------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Specify the introspection response type | Whether the token introspection endpoint should respond with a JSON object or JSON Web Token (JWT). The default is application/json. | -| Specify the list of claims to forward as headers to the backend | Forward claims from the token introspection response in the proxy header to the backend service.

      Can only be applied if the introspection response is configured to application/json. | -| Enable JWT token forwarding to backend | Forward introspection token response to backend service.

      Can only be applied if the introspection response is configured to application/jwt. | -| Specify how long introspected tokens will be cached | Specifies how long the introspected tokens will be cached. Tokens will be refreshed from the URI endpoint after the duration. Set as **0** to disable.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | -| Specify OAuth2 Token Type Hint | A hint about the token type submitted for introspection. The protected resource can pass this parameter to help the authorization server optimize the token lookup. Values for this field are defined in [[RFC6749]](https://www.rfc-editor.org/rfc/rfc6749). | - -{{
      }} - -8. Enable Introspection Token **Claim Verification**. To add a claim to verify click **+ Add a claim**, to add more than one claim to verify click the same symbol. To delete a claim click the **trash can** symbol for that claim. - -{{}} - -| Configuration Setting | Description | -|------ |---------------------------------------------------------------| -| Claim | The claim name. If the claim is nested, layers of depth are indicated with periods, example: `resource_access.account.roles`. | -| Type | The claim data type. | -| Delimiter | The claim value delimiter if value is a delimited string. | -| Value | The claim value to verify. | - -{{}} - -9. Enable **Resolver** if external DNS required. - -{{}} - -| Configuration Setting | Description | -|---------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Time Out | By default, NGINX caches answers using the TTL value of a response. The valid parameter allows overriding it.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | -| Valid For | Sets a timeout for name resolution.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | -| Hostname | The DNS Hostname or IP Address. Multiple DNS Resolvers can be added for a given OAuth2 Introspection Policy. | -| Listened Port | The DNS Port number | - -{{
      }} - -10. Update **Error Handling**. - -{{}} - -| Configuration Setting | Description | -|---------------------------------------------------- |-------------------------------------------------------------------------------------------------------------------------------------- | -| Specify authorization failed error code | The error code that needs to be used by the NGINX data plane when the backend service cannot find a token match or access is forbidden. | -| Specify authorization token not provided error code | The error code that needs to be used by the NGINX data plane when the backend service when a token is not supplied. | - -{{}} - -11. Select **Add**. -12. Select **Save and Submit**. - - {{%/tab%}} - {{%tab name="REST API"%}} - -Send a POST request to add the OAuth2 Introspection policy to the API-Proxy. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------| -| POST | `/services/workspaces//proxies` | - -{{}} - - -{{< call-out "note" >}} While all request body configuration values are presented in the request body structure example below, not all configuration - values are compatible. Please see the configuration value description table for further information. {{< /call-out >}} - -
      -JSON request - -```json -{ - "name": "{{proxyName}}", - "version": "v1", - "proxyConfig": { - "hostname": "{{environmentHostname}}", - "ingress": { - "basePath": "/api" - }, - "backends": [ - { - "serviceName": "backend-svc", - "serviceTargets": [ - { - "hostname": "10.0.0.10" - } - ] - } - ], - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "https://example.idp.com:8443/oauth/v2/oauth-introspect", - "enableSNI": true, - "proxyTLSName": "test.oauth.com", - "introspectionResponse": "application/json", - "cacheIntrospectionResponse": "5m", - "clientTokenSuppliedIn": "HEADER", - "clientTokenName": "Authorization", - "authzServerTokenHint": "ACCESS_TOKEN", - "forwardToken": false, - "forwardedClaimsInProxyHeader": [ - "username", - "exp", - "scope" - ], - "verifyClaims": [ - { - "claim": "sub", - "type": "STRING", - "value": "a95117bf-1a2e-4d46-9c44-5fdee8dddd11" - }, - { - "claim": "scope", - "type": "STRING", - "value": "read write email", - "delimiter": "SPACE" - }, - { - "claim": "aud", - "type": "ARRAY", - "value": ["https://protected.example.net/resource"] - }, - { - "claim": "resource_access.account.groups", - "type": "STRING", - "value": "default-group" - }, - { - "claim": "resource_access.account.roles", - "type": "ARRAY", - "value": [ - "default-roles", - "offline_access", - ] - }, - { - "claim": "email_verified", - "type": "BOOLEAN", - "value": true - }, - { - "claim": "user-group", - "type": "INTEGER", - "value": 42 - } - ], - "resolver": { - "valid": "30s", - "timeout": "30s", - "servers": [ - { - "hostname": "example.com" - }, - { - "hostname": "10.0.0.11", - "port": 53 - } - ] - }, - "errorReturnConditions": { - "noMatch": { - "returnCode": 403 - }, - "notSupplied": { - "returnCode": 401 - } - } - }, - "data": [ - { - "clientAppID": "idp-client-app-id", - "clientSecret": "dbdaa3e1-f100-420x-bfd0-875bd8a77cd7" - } - ] - } - ] - } - } -} -``` - -
      - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default value | -|--------------------------------------- |------------------ |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |---------- |-------------------------------------------- | -| `introspectionEndpoint` | string | Example:
      `"https://idp.com/introspect"` | The IdP OAuth2 Token Introspection endpoint [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662) where NGINX IdP client will send client `access_token`. | True | N/A | -| `enableSNI` | boolean | `true`/`false` | Enables or disables passing of the server name through TLS Server Name Indication extension (SNI), [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066) when establishing a connection with the proxied HTTPS server. | False | `false` | -| `proxyTLSName` | string | Example: `test.oauth.com` | Allows overriding the server name used to verify the certificate of the proxied HTTPS server and to be passed through SNI when establishing a connection with the proxied HTTPS server. By default, the host part of the `proxy_pass` URL is used. | False | Host part of `introspectionRequest` | -| `introspectionResponse` | string | One of:
      [`"application/json"`,
      `"application/jwt"`] | Whether the token introspection endpoint should respond with a JSON object or JSON Web Token (JWT). | False | `"application/json"` | -| `cacheIntrospectionResponse` | string | Example: `"5m"` | Specifies how long the introspected tokens will be cached. Tokens will be refreshed from the URI endpoint after the duration. Set as `0s-m-h` to disable.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"5m"` | -| `clientTokenSuppliedIn` | string | One of:
      [`"HEADER"`, `"QUERY"`] | Specifies where the access token is supplied in the incoming user request. | False | `"HEADER"` | -| `clientTokenName` | string | Example:
      `"Authorization"` | Specifies the key under which the access token can be extracted from in the incoming user request.

      Note: To maintain Bearer token behavior, `clientTokenSuppliedIn` must be set to `HEADER`, and `clientTokenName` must be set to `Authorization`. This is the default behavior of the Introspection Policy. | False | `"Authorization"` | -| `authzServerTokenHint` | string | One of:
      [`"ACCESS_TOKEN"`,
      `"REFRESH_TOKEN"`] | A hint about the type of the token submitted for introspection. The protected resource can pass this parameter to help the authorization server optimize the token lookup. Values for this field are defined in [[RFC6749]](https://www.rfc-editor.org/rfc/rfc6749). | False | N/A | -| `forwardToken` | boolean | `true`/`false` | Forward introspection token response to backend service.

      Can only be applied if the `introspectionResponse` is set to `application/jwt`. | False | `true` | -| `forwardedClaimsInProxyHeader` | array of strings | Standard claims can be found in
      _OAuth2 Token Introspection_
      [[RFC 7662]](https://www.rfc-editor.org/rfc/rfc7662).

      This is not an exhaustive list,
      IdPs and Resource Owners can
      configure their own claims. | Forward claims from the token introspection response in the proxy header to the backend service.

      Can only be applied if the `introspectionResponse` is set to `application/json`. | False | `["scope",`
      `"username",`
      `"exp"]` | -| `verifyClaims[].claim` | string | Example:
      `"resource_access.account.roles"` | The claim name. If the claim is nested, layers of depth are indicated with periods. | True | N/A | -| `verifyClaims[].type` | string | One of:
      [`"STRING"`, `"ARRAY"`, `"BOOLEAN"`, `"INTEGER"`] | The claim data type. | True | N/A | -| `verifyClaims[].delimiter` | string | One of:
      [`"SPACE"`, `"COMMA"`, `"PERIOD"`, `"PLUS"`, `"COLON"`, `"SEMI-COLON"`, `"VERTICAL-BAR"`, `"FORWARD-SLASH"`, `"BACK-SLASH"`, `"HYPHEN"`, `"UNDERSCORE"`] | The claim value delimiter if value is a delimited string. | Semi-Optional | N/A | -| `verifyClaims[].value` | - | Examples:
      `"test-user-1"`
      `"read write email"`
      `["default-roles","offline_access"]`
      `42`
      `true` | The claim value to verify. | True | N/A | -| `resolver.valid` | string | Example: `"30s"` | By default, NGINX caches answers using the TTL value of a response. The `valid` parameter allows overriding it.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"30s"` | -| `resolver.timeout` | string | Example: `"30s"` | Sets a timeout for name resolution.

      Follows [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html). | False | `"30s"` | -| `resolver.servers[].hostname` | string | Valid hostname or IP Address | The DNS Hostname. | True | N/A | -| `resolver.servers[].port` | int32 | Valid 32-bit integer | The DNS Port number. | False | N/A | -| `errorReturnConditions.noMatch` | integer | In range: `400` - `599` | The error code that needs to be used by the NGINX data plane when the backend service cannot find a token match or access is forbidden. | False | `403` | -| `errorReturnConditions.notSuppplied` | integer | In range: `400` - `599` | The error code that needs to be used by the NGINX data plane when the backend service when a token is not supplied. | False | `401` | -| `data.clientAppID` | string | Example:
      `"nginx-docs-client"` | Identifies the IdP Client making the token introspection request. | True | N/A | -| `data.clientSecret` | string | Example:
      `"db3e1-f100-420x-bfd0"` | The IdP Client secret/password. | True | N/A | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{
      }} - ---- - -## Enabling Server Name Indication - -A generic solution for running several HTTPS servers on a single IP address is the [TLS Server Name Indication (SNI)](https://en.wikipedia.org/wiki/Server_Name_Indication) extension [[RFC 6066]](https://www.rfc-editor.org/rfc/rfc6066), which allows a client to pass a requested server name during the SSL -handshake. This solution lets the server know which certificate it should use for the client connection. - -Some Authorization Servers require SNI to be enabled during the OAuth Introspection request SSL handshake. When an Authorization server needs SNI, the following error messages will appear in the NGINX error logs on the data plane proxy host. - -```log -2022/12/04 15:24:43 [warn] 9501#9501: *73 upstream server temporarily disabled while SSL -handshaking to upstream, client: 10.0.0.1, server: api.io, -request: "GET /api HTTP/1.1", subrequest: "/_oauth2_send_introspection_request_0a2f6842_default", -upstream: "https://test.oauth.com:443/test/oauth2/introspect", host: "api.io" - -2022/12/04 15:24:43 [error] 9501#9501: *73 js: OAuth unexpected response from introspection server -(HTTP 502): {"message":"Bad Gateway","status":"502"} - -2022/12/04 15:25:27 [error] 9500#9500: *79 SSL_do_handshake() failed (SSL: error:14094410:SSL -routines:ssl3_read_bytes:sslv3 alert handshake failure:SSL alert number 40) while SSL handshaking -to upstream, client: 10.0.0.1, server: api.io, request: "GET /api HTTP/1.1", -subrequest: "/_oauth2_send_introspection_request_0a2f6842_default", -upstream: "https://test.oauth.com:443/test/oauth2/introspect", host: "api.io" -``` - -To enable sending the SNI with the OAuth Introspection request, set the `oauth-introspection` policy `action.enableSNI` value to `true`. By default, the host part of the `action.introspectionRequest` value is used. To override the default behavior and send a different server name through SNI, set `action.proxyTLSName` as the server name required to verify the certificate of the Authorization Server. - -
      -JSON request - -```json -{ - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "https://example.idp.com:8443/oauth/v2/oauth-introspect", - "enableSNI": true, - "proxyTLSName": "test.oauth.com" - } - } - ] - } -} -``` - -
      - -If the override value provided in `action.proxyTLSName` is incorrect, the Authorization Server should respond with a `4xx` client error code. The following error log is an example of an incorrect override `action.proxyTLSName` found in the NGINX error logs on the data plane proxy host. - -```log -2022/12/04 15:27:12 [error] 7477#7477: *50 js: OAuth unexpected response from -introspection server (HTTP 403): Forbidden -``` - -In this example, the end user also gets a `403 Forbidden` response from the data plane proxy. If `action.cacheIntrospectionResponse` is enabled and `action.proxyTLSName` is changed from a correct value to an incorrect value, the cached access token is valid until it expires. When the cached access token expires, end users will see their requests to the data plane proxy return with `403 Forbidden` responses. - -The NGINX OAuth2 Introspection configuration used by API Connectivity Manager does not cache tokens if the introspection request status code is anything other than `200 Success`. Any introspection requests with user access tokens returning `4xx` or `5xx` response codes will work once the policy introspection configuration is corrected and the Authorization Server responds with status code `200`. - -## Policy Interoperability Considerations - -It is only possible to configure one OAuth2 Introspection Policy per Proxy in API Connectivity Manager. Only one set of `clientAppId` credentials can be -configured per OAuth2 Introspection Policy. - -While an OAuth2 Introspection policy is configured for a Proxy in API Connectivity Manager it is not possible to configure any of the following policies on -the same Proxy: - -1. API Key Authentication -2. Basic Authentication -3. JWT Assertion - -Similarly, if any of the above three policies are configured for a Proxy in API Connectivity Manager, it is not possible to additionally configure an OAuth -2.0 Introspection Policy. - -## Security Considerations - -### Token Caching - -Consumers of the introspection endpoint may wish to cache the response of the endpoint for performance reasons. As such, it is important -to consider the performance and security trade-offs when deciding to cache the values. For example, shorter cache expiration times will -result in higher security since the resource servers will have to query the introspection endpoint more frequently, but will result in an -increased load on the endpoint. Longer expiration times leave a window open where a token may actually be expired or revoked, but still be -able to be used at a resource server for the remaining duration of the cache time. - -One way to mitigate this problem is for consumers to never cache the value beyond the expiration time of the token, which would have been -returned in the `“exp”` parameter of the introspection response. - -### JWT Introspection Responses - -The introspection response type `application/jwt`, configured through `action.introspectionResponse`, has not had its security protocol -specification finalised at the time of development and writing it remains in **DRAFT** state. The draft specification _JWT Response for_ -_OAuth Token Introspection_ can be found [here](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-jwt-introspection-response). - -While in this state the specification is likely to change at any time, and how we implement it in API Connectivity Manager may change to meet the requirements of -the specification. We recommend that the default OAuth2 Introspection response type `application/json` is used for all production -scenarios. - -## Related Links - -- [RFC-6749: The OAuth2 Authorization Framework](https://www.rfc-editor.org/rfc/rfc6749) -- [RFC 7662: OAuth2 Token Introspection](https://www.rfc-editor.org/rfc/rfc7662) -- [IETF-Draft: JWT Response for OAuth Token Introspection](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-jwt-introspection-response) -- [NGINX time measurement syntax](http://nginx.org/en/docs/syntax.html) diff --git a/content/nms/acm/how-to/policies/jwt-assertion.md b/content/nms/acm/how-to/policies/jwt-assertion.md deleted file mode 100644 index 29608ca28..000000000 --- a/content/nms/acm/how-to/policies/jwt-assertion.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - secure API Gateways by applying an OAuth2 JSON Web Token (JWT) Assertion policy. -nd-docs: DOCS-1119 -toc: true -weight: 900 -title: JWT Assertion -type: -- concept ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## OAuth2 JWT Assertion - -Authentication & authorization policies allow a user to restrict access to their APIs by determining the caller's identity and access level. There are several API Gateway authentication/authorization policy types supported by API Connectivity Manager: API key authentication, basic authentication, OAuth2 JWT assertion, and OAuth2 token introspection. This guide focuses specifically on OAuth2 JWT Assertion. - -[JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519) (JWTs, pronounced “jots”) are a compact and highly portable means of exchanging identity information. JWTs can be used for client authorization and are a better way to control access to web‑based APIs than traditional API keys. Using JWTs as API keys provides a high‑performance alternative to traditional API keys, combining best‑practice authorization technology with a standards‑based schema for exchanging identity attributes. - -API Connectivity Manager API owners can restrict access to their APIs with JWTs. The API Proxy Policy can be configured to grant access to APIs only after validating a client's JWT. - -{{OAuth2 JWT Assertion Flow.}} - ---- - -## Anatomy of a JWT - -JWTs have three parts: a header, a payload, and a signature. In transmission, they look like the following (line breaks have been added for readability, the actual JWT is a single string): - -```jwt -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9. -eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ. -SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c -``` - -A period (`.`) separates the header, payload, and signature. The header and payload are [Base64‑encoded](https://www.rfc-editor.org/rfc/rfc4648#section-5) JSON objects. The signature is encrypted using the algorithm specified by the alg header, which we can see when we decode our sample JWT: - - -{{}} - -| | Encoded | Decoded | -|---------|---------|---------| -| Header | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9` | `{`
          `"alg": "HS256",`
          `"typ": "JWT"`
      `}` | -| Payload | `eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6`
      `IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ`= | `{`
          `"sub": "1234567890",`
          `"name": "John Doe",`
          `"iat": 1516239022`
      `}` | - -{{
      }} - - -The [JWT standard](https://www.rfc-editor.org/rfc/rfc7519) defines several signature algorithms. The value HS256 in the example refers to HMAC SHA‑256. F5 NGINX Plus supports the HSxxx, RSxxx, and ESxxx [signature algorithms](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) that are defined in the [standard](https://www.rfc-editor.org/rfc/rfc7518#section-3.1). The ability to cryptographically sign JWTs makes them ideal to be used for client authorization. - ---- - -## How NGINX Plus Validates a JWT - -A JWT is considered to be valid when the following conditions are met: - -1. The signature can be verified with a local or remote [JSON Web Key](https://datatracker.ietf.org/doc/html/rfc7517) (matching on the `kid` (“key ID”), if present, and `alg` (“algorithm”) header fields). -2. The JWT is presented inside the validity period when defined by one or both of the `nbf` (“not before”) and `exp` (“expires”) claims. - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with an [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have published one or more [API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - ---- - -## Create an OAuth2 JWT Assertion Policy - -Take the steps in this section if you would like to restrict access to APIs to clients with a valid JWT. You can set up an OAuth2 JWT Assertion policy using either the web interface or the REST API. - -{{}} -{{%tab name="API"%}} - -Send a `POST` request to add the OAuth2 JWT Assertion policy to the API Proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -{{< call-out "warning" >}} Local JSON Web Key usage with the policy configuration value `jwksKeys[]` is recommended for test/debugging environments only. For production environments, `jwksURI` should be used for remote JSON Web Key retrieval. {{< /call-out >}} -{{< call-out "note" >}} While all request body configuration values are presented in the request body structure example below, not all configuration values are compatible. See the configuration value description table for further information. {{< /call-out >}} - -```json -{ - "policies": { - "oauth2-jwt-assertion": [ - { - "action": { - "jwksURI": "https://idp.io:8443/oauth/certs", - "cacheKeysDuration": "12h", - "jwksKeys": [ - { - "k": "bXlzZWNyZXQ", - "kid": "0001", - "kty": "oct" - } - ], - "tokenName": "Authorization", - "tokenSuppliedIn": "HEADER", - "errorReturnConditions": { - "notSupplied": { - "returnCode": 401 - }, - "noMatch": { - "returnCode": 403 - } - } - } - } - ] - } -} -``` - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default value | -|---|---|---|---|---|---| -| `jwksURI` | string | Example:
      `https://idp.io:8443/oauth/certs` | URI endpoint that contains public keys used to verify the JWT signature.

      Not required if `jwksKeys[]` is populated. | Semi-optional | N/A | -| `cacheKeysDuration` | string | Example: `12h` | Specifies how long the keys will be cached. Keys will be refreshed from the URI endpoint after the duration.

      Only valid for `jwksURI`, not applicable for `jwksKeys[]`. Follows [NGINX configuration time measurement](http://nginx.org/en/docs/syntax.html) units syntax. | No | `"12h"` | -| `jwksKeys[]` | array of JSON Web Keys | Example in policy request body. | Keys to be used to verify JWT signatures. User should supply key data in valid JSON Web Key format. See related standards for [JWK](https://datatracker.ietf.org/doc/html/rfc7517), [JWK Set Format](https://datatracker.ietf.org/doc/html/rfc7517#section-5), and the [jwksKeys parameter](https://datatracker.ietf.org/doc/html/rfc7517#section-5.1).

      Not required if `jwksURI` is populated. | Semi-optional | N/A | -| `tokenName` | string | Example: `Authorization` | The name of the header or query parameter where the JWT will be located in the API request.

      In the case of default case of `Authorization` Header, the JWT token is required to adhere to the [Bearer Token usage](https://www.rfc-editor.org/rfc/rfc6750) standard.

      Example: `Authorization: Bearer ` where `}` is the Base64 encoded Client JWT. | No | `"Authorization"` | -| `tokenSuppliedIn` | string | One of: [`"HEADER"`, `"QUERY"`] | Specifies where the access token is supplied in the incoming user request. | No | `"HEADER"` | -| `errorReturnConditions`
      `.notSupplied`
      `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an JWT is not supplied. | No | 401 | -| `errorReturnConditions`
      `.noMatch`
      `.returnCode` | int | In range `400-599` | The error code that is returned from the API Proxy when an invalid JWT is supplied. | No | 403 | - -{{< /bootstrap-table >}} - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. -2. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. -3. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **JSON Web Token Assertion**. -4. Choose the **JSON Web Key Set (JWKS) source**, for remote JWKS select **Enter a URI**, for local JWKS select **Enter a JWKS**. - - For JWKS Uri enter the JWKS URI as **URI location** and specify for how long the API Proxy should **cache the keys**, set to **0** to disable. - - For JWKS add an array of JSON Web Keys in JSON Web Key Set format. See related standards for [JWK](https://datatracker.ietf.org/doc/html/rfc7517), [JWK Set Format](https://datatracker.ietf.org/doc/html/rfc7517#section-5), and the [Keys](https://datatracker.ietf.org/doc/html/rfc7517#section-5.1) parameter. Example usage: - - ```json - { - "keys": [ - { - "k": "bXlzZWNyZXQ", - "kid": "0001", - "kty": "oct" - } - ] - } - ``` - -5. Specify **how the token is presented** in the request, either in the request **Headers** or as a **Query** parameter.. -6. Set custom error return code conditions if an JWT is **not supplied** or **validation fails**. -7. Select **Add** to apply the OAuth2 JWT Assertion policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} - -## Related Links - -- [NGINX Blog: Authenticating API Clients with JWT and NGINX Plus](https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/#Configuring-NGINX Plus-as-an-Authenticating-API-Gateway) -- [[RFC-6749] The OAuth 2.0 Authorization Framework](https://www.rfc-editor.org/rfc/rfc6749) -- [[RFC-6750] The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://www.rfc-editor.org/rfc/rfc6750) -- [[RFC-7517] JSON Web Key (JWK)](https://datatracker.ietf.org/doc/html/rfc7517) -- [[RFC-7519] JSON Web Token (JWT)](https://datatracker.ietf.org/doc/html/rfc7519) -- [[RFC-7521] Assertion Framework for OAuth 2.0 Client Authentication and Authorization Grants](https://www.rfc-editor.org/rfc/rfc7521) -- [[RFC-7523] JSON Web Token (JWT) Profile for OAuth 2.0 Client Authentication and Authorization Grants](https://www.rfc-editor.org/rfc/rfc7523) diff --git a/content/nms/acm/how-to/policies/log-format.md b/content/nms/acm/how-to/policies/log-format.md deleted file mode 100644 index b00c5a3dd..000000000 --- a/content/nms/acm/how-to/policies/log-format.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: As an Infrastructure Administrator, use this guide to implement a standard - log format for all environments hosting APIs. -nd-docs: DOCS-1127 -toc: true -weight: 700 -title: Log Format -type: -- how-to -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -The Log Format policy enables Infrastructure Admins to set the format for access logs. Detailed access logs are generated in either JSON (default) or Syslog format and are applied to new environments automatically. This policy can be customized to filter log content, adjust log severity levels, and designate log destinations. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings {#policy-settings} - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values |
      Description
      | Required | Default value | -|----------------------------------------------|-------------|--------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------| -| `type` | string | One of:
      `[JSON,`
      `NATIVE]` | The access logs can be created in either JSON or native NGINX log format (Syslog). | Yes | `JSON` | -| `logFormat.include` | string/enum | One or more of:
      `["BASIC",`
      `"INGRESS",`
      `"BACKEND",`
      `"RESPONSE"]` | Specifies what information should be logged. | No | `["BASIC",`
      `"INGRESS",`
      `"BACKEND",`
      `"RESPONSE"]` | -| `logFormat.variables` | string/enum | `List of values` | Only variables included in this array will be logged; everything else will be hidden. | No | `Empty list []` | -| `errorLogSeverity` | string | One of:
      `[DEBUG,`
      `INFO,`
      `NOTICE,`
      `WARN,`
      `ERROR,`
      `CRIT,`
      `ALERT,`
      `EMERG]` | The minimum severity level of errors that will be logged. | No | `WARN` | -| `logDestination.type` | string/enum | One of:
      `["FILE",`
      `"SYSLOG"]` | The destination for the log output, either a file or syslog. | Yes | `FILE` | -| `logDestination.`
      `accessLogFileLocation` | string | `/var/log/nginx` | The directory in which the access log file will be saved. The directory can be any valid UNIX filepath, with relative paths being relative to the default NGINX configuration directory (`/etc/nginx/`). | Yes | `/var/log/nginx` | -| `logDestination.`
      `errorLogFileLocation` | string | `/var/log/nginx` | The directory in which the error log file will be saved. This directory can be any valid UNIX filepath, with relative paths being relative to the default NGINX configuration directory (`/etc/nginx/`). | No | `/var/log/nginx` | -| `enablePrettyPrint` | boolean | `true`,
      `false` | This setting adds whitespace and indentation to make JSON logs more easily readable for humans. This setting is applicable only when the `type` is set to `JSON`. | No | `false` | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -In API Connectivity Manager, when an Infrastructure Administrator creates an environment, the following log format policy is applied by default: - -- Logs are in JSON format -- Logs are written to file -- Logs are saved to `/var/log/nginx` - -If these default options don't meet your requirements, you can customize the policy to suit your specific needs. Refer to the [Policy Settings](#policy-settings) section for the configurable options. - -
      - -{{}} -{{%tab name="API"%}} - -
      - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To add the Log Format policy using the REST API, send an HTTP `POST` request to the Environments endpoint. - - -{{}} - -| Method | Endpoint | -|--------|-------------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "log-format": [ - { - "action": { - "enablePrettyPrint": false, - "errorLogSeverity": "WARN", - "logDestination": { - "type": "FILE", - "accessLogFileLocation": "/var/log/nginx/", - "errorLogFileLocation": "/var/log/nginx/" - }, - "logFormat": { - "include": [ - "BASIC", - "INGRESS", - "BACKEND", - "RESPONSE" - ], - "variables": [] - }, - "type": "JSON" - } - } - ] - } -} -``` - -This JSON example defines the log format policy for an environment: the error log severity level is set to `WARN`; the log file location is `/var/log/nginx/`; and the log format includes `BASIC`, `INGRESS`, `BACKEND`, and `RESPONSE` information without any variables specified to limit what is logged. The pretty print feature is disabled, and the log type is set to `JSON`. - -
      - -{{%/tab%}} -{{%tab name="UI"%}} - -
      - -To add the Log Format policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. On the *Workspaces* page, select a workspace in the list to which you'll add an environment. -4. Select **Add** to add the environment. -5. On the *Create Environment* form, complete the necessary fields: - - - **Name**: Enter a name for the environment. - - **Description**: Describe the environment in a few words. - - **Type**: Select whether this is a production environment or not. - - **API Gateways**: Enter the API Gateway's name and hostname. - -6. Select **Create**. -7. On the *Environment Created* confirmation page, select **Go to \**. -8. In the *API Gateways* list, select the **Actions** menu (represented by an ellipsis, `...`). Then select **Edit Advanced Config**. -9. On the left menu, select **Global Policies**. -10. In the list of Global Policies, the Log Format policy should be enabled by default. To edit the policy, select the ellipsis icon (`...`), then select **Edit Policy**. -11. Customize the policy settings to suit your requirements. Refer to the [Policy Settings](#policy-settings) section for an overview of the available options and their possible configurations. -12. Select **Save** to save the changes. -13. Select **Save and Submit** to publish the policy changes to the environment. - -{{%/tab%}} -{{
      }} diff --git a/content/nms/acm/how-to/policies/manage-policies.md b/content/nms/acm/how-to/policies/manage-policies.md deleted file mode 100644 index 55406ed95..000000000 --- a/content/nms/acm/how-to/policies/manage-policies.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - set up policies. -nd-docs: DOCS-925 -toc: true -weight: 100 -title: How to Set Up Policies -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -{{< include "acm/how-to/policies-intro.md" >}} - ---- - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with [API Gateways]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Developer Portals]({{< ref "/nms/acm/getting-started/add-devportal" >}}). - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - ---- - -## Set Up Global Policies - -Global Policies are configured at the environment level and apply to all clusters and proxies within the environment. - -{{< include "acm/about/global-policies.md" >}} - -
      - -To manage Global Policies, take the steps below: - -1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments**. -2. Select the Environment that holds the cluster that you want to configure, then select the **Cluster** name. -3. Select the **Manage** icon for the cluster that you want to configure. -4. Select the **Global Policies** tab. -5. [**Add**](#add-cluster-policy), [**Edit**](#edit-cluster-policy), or [**Remove**](#remove-cluster-policy) as desired. -6. **Save and Submit** your changes. - -### Add a Policy {#add-cluster-policy} - -Take the steps in this section to add a new policy to a cluster. - -1. Go to **Manage > Global Policies** for the cluster. -1. Select **Add Policy** from the policy's **Actions** menu. -1. Complete the form provided to configure the policy, then select **Add**. -1. **Save and Submit** your changes. - -### Edit a Policy {#edit-cluster-policy} - -To edit a policy, take the steps below. - -1. Go to **Manage > Global Policies** for the cluster. -1. Select **Edit Policy** from the policy's **Actions** menu. -1. Edit the policy as needed. -1. Select **Save** and **Save and Submit**. - -### Remove a Policy {#remove-cluster-policy} - -To remove a policy, take the steps below. - -1. Go to the **Global Policies** tab for the cluster. -1. Select **Remove Policy** from the policy's **Actions** menu. - ---- - -## Set Up API Proxy Policies - -{{< include "acm/about/api-proxy-policies.md" >}} - -
      - -Any Global Policies will automatically be applied when you add an API Proxy. -You can also configure any of the optional policies at the proxy level. - -To manage Proxy Policies, take the steps below. - -1. In the API Connectivity Manager user interface, go to **Services > Workspaces > Proxies**. -1. Select **Edit Proxy** from the **Actions** menu for the Proxy that you want to configure. -1. Select the **Policies** tab. -1. [**Add**](#add-proxy-policy), [**Edit**](#edit-proxy-policy), or [**Remove**](#remove-proxy-policy) as desired. -1. **Save and Publish** your changes. - -### Add a Policy {#add-proxy-policy} - -Take the steps in this section to add a new policy to a cluster. - -1. Go to **Edit Proxy > Policies**. -1. Select **Add Policy** from the policy's Actions menu. -1. Complete the form to configure the policy, then select the **Add** button. -1. **Save and Submit** your changes. - -### Edit a Policy {#edit-proxy-policy} - -Take the steps below to edit a policy. - -1. Go to **Edit Proxy > Policies**. -1. Select **Edit Policy** from the policy's Actions menu. -1. Edit the policy as needed. -1. Select **Save**, then **Save and Publish**. - -### Remove a Policy {#remove-proxy-policy} - -To remove a policy, take the steps below. - -1. Go to **Edit Proxy > Policies**. -1. Select **Remove Policy** from the policy's Actions menu. - ---- - -## Set Up Cluster Policies - -Cluster Policies are applied to all the proxies belongnig to the desired cluster. In another words, these policies are applied to a cluster of F5 NGINX Plus instances which can have one or more API Gateways and Developer Portals deployed on them. - -The following table shows the available Cluster Policies you can use when creating a new cluster. - -
      - -**Legend:** - -- = Supported -- = Applied by default - -{{}} - -| Policy Name | HTTP Environment | gRPC Environment | Applied On | Description | -|-------------------------------------------------------------------|-------------------------------------------------|-------------------------------------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Cluster Wide Config Setting]({{< ref "/nms/acm/how-to/policies/cluster-wide-config.md" >}}) | | | inbound | Fine tune the settings to speed up data processing and improve the performance of the API proxy for large number of connections. When applied, the settings are applicable to all the instances in a proxy cluster. If the proxy cluster is shared between environments, the changes made in any environment will be reflected in all the other environments. | -| [Cluster Zone Sync]({{< ref "/nms/acm/how-to/policies/cluster-zone-sync.md" >}}) | | | inbound | Enables runtime state sharing between the instances belonging to a proxy cluster. Options configured through this policy affect other policies such as rate limit and OIDC. This policy is applied to all the instances in a proxy cluster. If the proxy cluster is shared between environments, any changes made to this policy will affect all the other environments. | - -{{}} - -
      - -To manage Cluster Policies, take the steps below: - -1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments**. -2. Select the Environment that holds the cluster that you want to configure, then select the **Cluster** name. -3. Select the **Manage** icon for the cluster that you want to configure. -4. Select the **Cluster Policies** tab. -5. [**Add**](#add-cluster-policy), [**Edit**](#edit-cluster-policy), or [**Remove**](#remove-cluster-policy) as desired. -6. **Save and Submit** your changes. - -### Add a Policy {#add-cluster-policy} - -Take the steps in this section to add a new policy to a cluster. - -1. Go to **Manage > Cluster Policies** for the cluster. -1. Select **Add Policy** from the policy's **Actions** menu. -1. Complete the form provided to configure the policy, then select **Add**. -1. **Save and Submit** your changes. - -### Edit a Policy {#edit-cluster-policy} - -To edit a policy, take the steps below. - -1. Go to **Manage > Cluster Policies** for the cluster. -1. Select **Edit Policy** from the policy's **Actions** menu. -1. Edit the policy as needed. -1. Select **Save** and **Save and Submit**. - -### Remove a Policy {#remove-cluster-policy} - -To remove a policy, take the steps below. - -1. Go to the **Cluster Policies** tab for the cluster. -1. Select **Remove Policy** from the policy's **Actions** menu. - ---- diff --git a/content/nms/acm/how-to/policies/openID-connect.md b/content/nms/acm/how-to/policies/openID-connect.md deleted file mode 100644 index 1dfb1ba59..000000000 --- a/content/nms/acm/how-to/policies/openID-connect.md +++ /dev/null @@ -1,428 +0,0 @@ ---- -description: As an Infrastructure Administrator, use this guide to configure OpenID - Connect policy to enable Single Sign On for the gateways. -nd-docs: DOCS-1134 -title: OpenID Connect -toc: true -weight: 910 -type: -- how-to -- reference ---- - ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About OpenID Connect Policy - -OpenID Connect (OIDC) builds on OAuth 2.0 to offer an identity layer and a unified authentication process for securing APIs, native apps, and web applications. Clients can authenticate an end-user's identity by using an Authorization Server. End-user information is communicated using claims in a security token called an identity token. - -The OpenID Connect policy for API Connectivity Manager provides users with a convenient and secure single sign-on experience, allowing them to log in to multiple OAuth-enabled applications with a single set of credentials. This policy can be easily integrated with any compatible identity provider, providing single sign-on access to both API gateways and Developer Portals. - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## Before You Begin - -Before configuring API gateways and Developer Portals as OpenID Connect relying parties (RPs), you need to gather the necessary Identity provider (IDP) details: - -- IDP's well-known endpoints -- Client ID -- Client Secret (needed depending on the OAuth flow) - -{{< call-out "note" >}} - -The Developer Portal supports both PCKE and AuthCode [authorization code flows](https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow). - -{{< /call-out >}} - ---- - -## Workflow for Applying Policy - -To apply the OpenID Connect (OIDC) policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Select the cluster on which to apply the policy. -- Check the advanced settings to see if the policy has been applied. -- Edit the policy as needed. -- Save and publish the changes. - ---- - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      FieldTypePossible Values
      Description
      RequiredDefault
      authFlowTypestringAUTHCODE
      PCKE
      PKCE is an OAuth 2.0 security extension for public clients on mobile devices intended to avoid a malicious programme creeping into the same computer from intercepting the authorisation code.NoAUTHCODE
      authorizationEndpointstringExample:
      https://accounts.google.com/o/oauth2/v2/auth
      URL of the IdP's OAuth 2.0 Authorization Endpoint.YesN/A
      authzParams.keystringBetween 1 and 128 charactersNoN/A
      authzParams.paramTypestringQUERY. PATH, HEADERNoN/A
      authzParams.valuestringBetween 1 and 128 charactersNoN/A
      errorReturnConditions.
      noMatch.
      returnCode
      integerIn range 100-599The error code that needs to be used by the nginx data-plane when basic auth is invalid or not supplied.No403
      errorReturnConditions.
      notSupplied.
      returnCode
      IntegerIn range 100-599The error code that needs to be used by the nginx data-plane when invalid clientID is supplied.No401
      jwksURIstringExample:
      https://www.googleapis.com/oauth2/v3/certs
      YesN/A
      logOffEndpointstringExample:
      https://oauth2.googleapis.com/revoke
      YesN/A
      logOutParams.keystringBetween 1 and 128 charactersNoN/A
      logOutParams.paramTypestringQUERY, PATH, HEADERNoN/A
      logOutParams.valuestringBetween 1 and 512 charactersNoN/A
      resolver.enableIPv6booleantrue/falseNofalse
      resolver.servers.hostnamestringBetween 1 and 253 charactersYesN/A
      resolver.servers.portintegerIn range 1-65535Yes80
      resolver.timeoutstringExample: 30s
      Between 2 and 14 characters
      No30s
      resolver.validstringExample: 24s
      Between 2 and 14 characters
      No30s
      returnTokenToClientOnLoginstringid_token, noneOptionally return token as a query param to the app after successful login.NoN/A
      tokenEndpointstringExample:
      https://oauth2.googleapis.com/token
      URL of the IdP's OAuth 2.0 Token Endpoint.YesN/A
      tokenParams.keystringBetween 1 and 128 charactersNoN/A
      tokenParams.paramTypestringQUERY, PATH, HEADERNoN/A
      tokenParams.valuestringBetween 1 and 512 charactersNoN/A
      uris.loginURIstringExample: /loginThis location is called by frontend for logging-in IDP using OpenID Connect.NoN/A
      uris.logoutURIstringExample: /logoutThis location is called by UI to handle OIDC logout with IDP as per: https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogoutNoN/A
      uris.redirectURIstringExample: /_codexchThis location is called by the IdP after successful authentication.NoN/A
      uris.userInfoURIstringExample: /userinfoThis location is called by frontend to retrieve user info via the IDP.NoN/A
      userInfoEndpointstringExample:
      https://openidconnect.googleapis.com/v1/userinfo
      URL of the IdP's UserInfo Endpoint.YesN/A
      userRegistrationstringUser registration URLs, can be used to specify customer or workforce registration URLs.NoN/A
      wellKnownEndpointstringExample:
      https://accounts.google.com/.well-known/openid-configuration
      OIDC .well-known configuration endpoint. The well-known endpoint returns OpenID Connect metadata about the authorization server.NoN/A
      - - -{{}} - - ---- - -You can set up an OIDC policy by using either the web interface or the REST API. - -## Applying the Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To apply the OpenID Connect policy using the REST API, send an HTTP `POST` request to the Environments endpoint. - - -{{}} - -| Method | Endpoint | -|--------|-----------------------------------------------------------------------| -| POST | `/api/v1/infrastructure/workspaces/{proxyWorkspaceName}/environments` | - -{{}} - - - -JSON request - -```json -{ - "policies": { - "oidc-authz": [ - { - "action": { - "authFlowType": "PKCE", - "authorizationEndpoint": "https:///v1/Authorize", - "jwksURI": "https:///v1/keys", - "logOffEndpoint": "https:///v1/logout", - "tokenEndpoint": "https:///v1/Token", - "userInfoEndpoint": "https:///v1/userinfo", - "uris": { - "loginURI": "/login", - "logoutURI": "/logout", - "redirectURI": "/_codexch", - "userInfoURI": "/userinfo" - }, - "returnTokenToClientOnLogin": "none", - "forwardTokenToBackend": "access_token", - "errorReturnConditions": { - "noMatch": { - "returnCode": 403 - }, - "notSupplied": { - "returnCode": 401 - } - } - }, - "data": [ - { - "clientID": "myclientID1234", - "scopes": "email+openid+profile" - } - ] - } - ] - } -} -``` - -This JSON defines an OpenID Connect (OIDC) authorization policy. It specifies the URL endpoints for the authorization, token, and user info services, as well as the URIs for login, logout, and redirect activities. It also defines that the client ID and scopes are "myclientID1234" and "email+openid+profile", respectively. Additionally, it specifies how to handle errors, such as returning a 403 code when there is no match and a 401 code when the data is not supplied. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -1. {{< include "acm/webui-acm-login.md" >}} -1. On the left menu, select **Infrastructure**. -1. From the list of workspaces, select the workspace for your cluster's environment. -1. From the list of environments, select the environment for your cluster. -1. From the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. -1. On the left menu, select **Global Policies**. -1. Locate the **OpenID Connect Relying Party** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. -1. In the API Connectivity Manager user interface, go to **Infrastructure > Workspaces > Environments** and select the **Edit Advanced Config** from the **Actions** menu for the cluster you want to set up. -1. Select the **Global Policies** tab. -1. For **OpenID Connect Relying Party** select **Add Policy** from the policy's **Actions** menu. -1. Update **Application Settings**. - -{{< include "acm/how-to/update-application-settings.md" >}} - -12. Update **Authorization Server Settings** - -{{< include "acm/how-to/update-authorization-server-settings.md" >}} - -13. Update **General Settings** - -{{< include "acm/how-to/update-general-settings.md" >}} - -14. Update **Custom Error Handling**. - - You can customize how the proxy should handle the following error conditions: - -- when Client ID is not supplied -- when there is no match for the Client ID - - Specify the HTTP error code in the box next to the error condition. The specified error code will be displayed when the related error condition is true. - -15. Select **Add**. -1. Select **Save and Submit** your changes. - -{{%/tab%}} - -{{}} diff --git a/content/nms/acm/how-to/policies/proxy-cache.md b/content/nms/acm/how-to/policies/proxy-cache.md deleted file mode 100644 index dc0becfe9..000000000 --- a/content/nms/acm/how-to/policies/proxy-cache.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - enable and configure caching to improve the performance of your API gateway proxy. -nd-docs: DOCS-1190 -title: Proxy Cache -toc: true -weight: null -type: -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro.md" >}} - ---- - -## About the Policy - -Enable and configure caching to improve the performance of your API Gateways, speed up delivery to clients, and reduce the load on the backend API runtimes. When caching is enabled, the API Gateway saves responses to a disk cache and uses them to respond to clients without having to proxy requests for the same content every time. - -By default, the API Gateway caches all responses to requests made with the HTTP GET and HEAD methods the first time such responses are received from a proxied server. The API Gateway uses the request string as a request's key (identifier). If a request has the same key as a cached response, the API Gateway sends the cached response to the client. You can customize and control which responses are cached. - -Fine-tune the cache for further improvements in performance by instructing it to use conditional GET requests when refreshing content from origin servers, set a minimum request number to cache content, enable background update, and cache lock. - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## How to apply the policy - -- Create an API proxy or edit an existing one. -- Check the advanced settings for the API proxy to see if the policy has been applied. -- Edit the policy to make changes for each API proxy. Save and publish the changes. - ---- - -## Policy Settings - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values | Description | Required | Default | -|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------| -| `httpMethods` | array | GET, HEAD, POST | HTTP request methods to cache. | No | GET, HEAD | -| `cacheKey` | array | host, requestURI, scheme, cookieJSessionID, cookieUser | Keys to be cached. 'host' is the name & port of the proxied server. 'requestURI' is the URI of the request. 'scheme' is the protocol used to access the resource on the proxied server. 'cookieJSessionID' is the cookie used for session management. 'cookieUser' is the cookie used for user management. | No | host, requestURI, scheme | -| `maxCacheSize` | string | 1K (available units - K, M, G) | Upper limit of the size of the cache. | No | 1G | -| `cacheValidTime` | string | 1s (available units - s, m, h) | Enforces an expiration for the cached data. | No | 10m | -| `minUseOfProxyToCache` | integer | 1 | Minimum number of client requests before caching is enabled. | No | 1 | -| `reValidate` | boolean | true/false | Enables revalidation of expired cache items using conditional GET requests with the If-Modified-Since and If-None-Match header fields. | No | false | -| `backgroundUpdate` | boolean | true/false | Enables delivery of stale content when clients request an item that is in the process of being updated from the origin server. All updates will be done in the background. The stale file is returned for all requests until the updated file is fully downloaded. | No | false | -| `stale.backendErrors` | array | error, timeout, invalid_header, updating | Determines in which cases a stale cached response can be used during communication with the proxied server. | No | | -| `stale.backendCodes` | array | 403, 404, 429, 500, 502, 503, 504 | Determines for which HTTP status codes a stale cached response can be used during communication with the proxied server. | No | | -| `cacheLock.enabled` | boolean | true/false | When enabled, only one request at a time will be allowed to populate a new cache element identified according to the cacheKey by passing a request to a proxied server. | No | false | -| `cacheLock.age` | string | 1s (available units - s, m, h) | If the last request passed to the proxied server for populating a new cache element has not completed for the specified time, one more request may be passed to the proxied server. | No | 5s | -| `cacheLock.timeout` | string | 1s (available units - s, m, h) | Sets a timeout for cacheLock; When the time expires, the request will be passed to the proxied server, however, the response will not be cached. | No | 5s | - -{{< /bootstrap-table >}} - - ---- - -## Adding the Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To apply the Proxy Cache policy using the REST API, send an HTTP `POST` request to the Proxies endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|---------------------| -| `POST` | `/services/workspaces/{workspaceName}/proxies` | - -{{}} - - -
      -JSON request - -``` json -{ - "policies": { - "proxy-cache": [ - { - "action": { - "httpMethods": [ - "GET", - "HEAD", - "POST" - ], - "cacheKey": [ - "host", - "requestURI", - "scheme", - "cookieJSessionID", - "cookieUser" - ], - "maxCacheSize": "1G", - "cacheValidTime": "10m", - "minUseOfProxyToCache": 1, - "reValidate": true, - "backgroundUpdate": true, - "stale": { - "backendErrors": [ - "error", - "timeout", - "invalid_header", - "updating" - ], - "backendErrorCodes": [ - 403, - 404, - 429, - 500, - 502, - 503, - 504 - ] - }, - "cacheLock": { - "enabled": true, - "age": "5s", - "timeout": "5s" - } - } - } - ] - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To apply the Proxy Cache policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Services**. -3. Select a workspace in the list that contains the API Proxy you want to update. -4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -5. On the left menu, select **API Proxy > Advanced > Policies**. -6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Proxy Cache**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Add Policy**. -7. Modify the configuration as needed. -8. Select **Add** to apply the policy to the API Proxy. -9. Select **Save and Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} - -{{
      }} - ---- diff --git a/content/nms/acm/how-to/policies/proxy-request-headers.md b/content/nms/acm/how-to/policies/proxy-request-headers.md deleted file mode 100644 index 7abe837b9..000000000 --- a/content/nms/acm/how-to/policies/proxy-request-headers.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - set request headers to send to your backend services. -nd-docs: DOCS-1129 -toc: true -weight: 1100 -title: Proxy Request Headers -type: -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## About the Policy - -The Proxy Request Headers policy allows users to pass default and custom request headers to backend services. - -This policy is enabled by default when you [publish an API Proxy]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}). - -### Intended Audience - -{{< include "acm/how-to/policies/api-owner-persona.md">}} - ---- - -## Before You Begin - -To complete the steps in this guide, you need the following: - -- API Connectivity Manager is installed, licensed, and running. -- An [API gateway environment]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) -- A [published API Gateway]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}) - ---- - -## Policy Settings {#policy-settings} - -The following table lists the configurable settings and their default values for the policy. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default | -|-------------------------------------------------|----------|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------| -| `proxyDefaultHeadersToBackend` | boolean | `true`,
      `false` |

      When set to `true`, the default headers are passed to backend services.

      For more information, refer to the [Default Headers]({{< ref "#default-headers" >}}) section.

      | No | `True` | -| `proxyCustomHeadersToBackend.`
      `key` | string | Example: `my-header` | The name of the HTTP header. | Yes | N/A | -| `proxyCustomHeadersToBackend.`
      `value` | string | Example: `var.test` |

      The value of the HTTP header.

      For more information, refer to the [Header Value Prefixes]({{< ref "#value-prefixes" >}}) section.

      | Yes | N/A | -| `proxyCustomHeadersToBackend.`
      `isSensitive` | boolean | `true`,
      `false` | When set to `false`, the header will not appear in logs. | No | `False` | - -{{< /bootstrap-table >}} - - -### Default Headers {#default-headers} - -{{< call-out "note" >}}When `proxyDefaultHeadersToBackend` is `true`, the following headers are applied.{{< /call-out >}} - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Description | -|-------------------|------------------------------------------------------------| -| `Accept-Encoding` | Set to an empty string. | -| `Host` | Set to the IP address of the machine proxying the request. | -| `X-Real-IP` | Set to the IP client's address. | -| `Connection` | Set to an empty string. | - -{{< /bootstrap-table >}} - - -### Header Value Prefixes {#value-prefixes} - -{{< call-out "note" >}}When adding a custom header to `proxyCustomHeadersToBackend,` include one of the following prefixes for the `value` setting.{{< /call-out >}} - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Prefix | Example | Description | -|---------------|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `var` | var.content_length | Pass a [valid NGINX variable](http://nginx.org/en/docs/varindex.html). | -| `header` | header.referrer | Pass a header from the client request. | -| `client` | client.IP | Pass a value from the client if a [Basic Auth]({{< ref "/nms/acm/how-to/policies/basic-authn" >}}) or [API Key]({{< ref "/nms/acm/how-to/policies/apikey-authn" >}}) policy has been configured. | -| `stringValue` | stringValue.MyString | Pass a static string. | -| `token` | token.sub | Pass a value from the JSON Web Token (JWT) if the [OAuth2 JWT Assertion]({{< ref "/nms/acm/how-to/policies/jwt-assertion" >}}) policy has been configured. | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -You can apply this policy using either the web interface or the REST API. - -
      - -{{}} -{{%tab name="API"%}} - -
      - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To apply the Proxy Request Headers policy using the REST API, send an HTTP `PUT` request to the Proxies endpoint. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------| -| `PUT` | `/services/workspaces/{SERVICE_WORKSPACE_NAME}/proxies` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "proxy-request-headers": [ - { - "action": { - "proxyHeaders": { - "proxyDefaultHeadersToBackend": true, - "proxyCustomHeadersToBackend": [ - { - "key": "my-custom-header", - "value": "stringValue.myValue", - "isSensitive": true - } - ] - } - } - } - ] - } -} -``` - -This JSON configures a policy for handling proxy request headers. It instructs the proxy to forward the default headers to the backend, and also to forward a custom header, `my-custom-header`, with a specific value, `stringValue.myValue`. The custom header is marked as sensitive, meaning it won't show up in the logs. - -
      - -{{%/tab%}} -{{%tab name="UI"%}} - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Services**. -3. Select a workspace in the list that contains the API Proxy you want to update. -4. On the workspace overview page, on the **API Proxies** tab, locate the API Proxy you want to update. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Proxy**. -5. On the left menu, select **API Proxy > Advanced > Policies**. -6. On the *Advanced > Policies* page, on the **API Proxy** tab, locate **Proxy Request Headers**. Select the **Actions** menu (represented by an ellipsis, `...`), then select **Edit Policy**. -7. Toggle **Set Default Headers** on or off to include default headers or not. This setting is enabled by default. -8. To add custom headers, select **Add Custom Header**, then complete the necessary fields: - - - **Header**: The name of the custom HTTP header. - - **Value**: The value of the custom HTTP header. - - The value must include one of the following prefixes: - - - `var.` - - `header.` - - `client.` - - `stingValue.` - - `token.` - - For example, to pass a static string for the value, you might type `stringValue.` for the value. - - To learn more about the prefix options and formatting requirements, refer to the [Header Value Prefixes](#value-prefixes) section. - - - **Is Sensitive**: Turn on to prevent writing the custom header to logs. - -9. Select **Save** to apply the policy to the API Proxy. -10. Select **Save and Publish** to deploy the configuration to the API Proxy. - -{{%/tab%}} -{{
      }} - diff --git a/content/nms/acm/how-to/policies/proxy-response-headers.md b/content/nms/acm/how-to/policies/proxy-response-headers.md deleted file mode 100644 index 2155a2934..000000000 --- a/content/nms/acm/how-to/policies/proxy-response-headers.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - set response headers to send to your clients. -nd-docs: DOCS-1135 -title: Proxy Response Headers -toc: true -weight: 1150 -type: -- reference ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - -## About the Policy - -Customize the Proxy Response Headers policy to include or exclude headers in the proxy response. By default, the standard headers are included in the response. In addition, you can specify whether the header is always included regardless of the response code. You can also add custom headers and values to include in the response. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here’s what you need to do: - -- Create an environment or edit an existing one. -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings - -The following table lists the configurable settings and their default values for the policy. - -### Standard Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Datatype | Possible Values |
      Description
      | Required | Default | Always Include| -|--------------|----------|---------------------|----------------------------------------------------|----------|-----------------------|---------| -| `web-security-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Web Security Headers section. | No | true | true | -| `latency-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Latency Headers section. | No | true | false | -| `cache-headers` | boolean | true/false | When set to true, the default headers are passed in proxy response. For more information, refer to the Cache Headers section. | No | true | true | -| `client-headers` | boolean | true/false | When set to true, X-Client-Original-IP header is passed in proxy response. For more information, refer to the Client Headers section. | No | true | true | -| `hide-nginx-headers` | boolean | true/false | When set to true, nginx version is not passed in Server header in proxy response. For more information, refer to the Hide NGINX Headers section. | No | false | false | -| `correlation-id` | boolean | true/false | When set to true, the correlation id header is passed in proxy response. For more information, refer to the Correlation ID Headers section. | No | true | true | - -{{< /bootstrap-table >}} - - -### Web Security Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `Strict-Transport-Security` | Strict-Transport-Security: max-age=31536000; includeSubDomains | add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" [always] | HSTS response header informs browsers that the site should only be accessed using HTTPS, and that any future attempts to access it using HTTP should automatically be converted to HTTPS. | -| `X-Frame-Options` | X-Frame-Options: SAMEORIGIN | add_header X-Frame-Options SAMEORIGIN [always] | The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in iframe. Sites can use this to avoid click-jacking attacks, by ensuring that their content is not embedded into other sites. | -| `X-Content-Type-Options` | X-Content-Type-Options: nosniff | add_header X-Content-Type-Options nosniff [always] | The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised in the Content-Type headers should be followed and not be changed. The header allows you to avoid MIME type sniffing by saying that the MIME types are deliberately configured. | -| `X-Xss-Protection` | X-Xss-Protection: 1; mode=block | add_header X-Xss-Protection "1; mode=block" [always] | The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks. These protections are largely unnecessary in modern browsers when sites implement a strong Content-Security-Policy that disables the use of inline JavaScript ('unsafe-inline'). | - -{{< /bootstrap-table >}} - - -### Latency Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `X-Backend-Latency` | X-Backend-Latency: 0.744 | add_header X-Backend-Latency $upstream_header_time [always] | Backend/Upstream response time | -| `X-Total-Request-Response-Latency` | X-Total-Request-Response-Latency: 0.743 | add_header X-Total-Request-Response-Latency $request_time [always] | Request time | -| `X-Backend-Connection-Time` | X-Backend-Connection-Time: 0.433 | add_header X-Backend-Connection-Time $upstream_connect_time [always] | Backend/Upstream connect time | - - -{{< /bootstrap-table >}} - - -### Cache Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `Cache-Control` | Cache-Control: public, must-revalidate, proxy-revalidate | add_header Cache-Control "public, must-revalidate, proxy-revalidate" [always] | The Cache-Control HTTP header field holds directives (instructions) — in both requests and responses — that control caching in browsers and shared caches (e.g. Proxies, CDNs). | - -{{< /bootstrap-table >}} - - -### Client Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `X-Client-Original-IP` | X-Client-Original-IP: 172.10.10.10 | add_header X-Client-Original-IP $realip_remote_addr [always] | Client original IP. | - -{{< /bootstrap-table >}} - - -### Hide NGINX Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `Server` | Server: nginx | server_tokens off | NGINX version is not passed in Server header in proxy response. Server: nginx [nginx/1.23.2] | - - -{{< /bootstrap-table >}} - - -### Correlation ID Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `` | x-correlation-id: 26fd65ab0bbe36e546e3da14f4aea89f | add_header ` [always]` | There must also be a request-correlation-id policy that will tell you the header name that gets used. The correlation id value is usually the $request_id but there is logic that that can be overridden by a specific value in the request header itself. | - - -{{< /bootstrap-table >}} - - -### Custom Headers - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Header | Example | Directive |
      Description
      | -|--------------|----------|---------------------|----------------------------------------------------| -| `` | x-custom-header: 3da14f4aea89f | add_header ` [always]` | Add a custom header. | - - -{{< /bootstrap-table >}} - - ---- - -## Adding the Policy - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To create or update a Response Headers policy using the REST API, send an HTTP `PUT` request to the Environment endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|---------------------| -| `PUT` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - - -
      -JSON request - -``` json -{ - "policies": { - "proxy-response-headers": [ - { - "action": { - "config": [ - { - "always": true, - "enabled": true, - "name": "web-security-headers" - }, - { - "always": true, - "enabled": true, - "name": "correlation-id" - }, - { - "always": false, - "enabled": true, - "name": "latency-headers" - }, - { - "always": true, - "enabled": true, - "name": "cache-headers" - }, - { - "always": false, - "enabled": false, - "name": "hide-nginx-headers" - }, - { - "always": true, - "enabled": true, - "name": "client-headers" - } - ], - "customResponseHeaders": [ - { - "always": true, - "key": "x-custom-header", - "value": "3da14f4aea89f" - } - ] - } - } - ] - } -} -``` - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To create/update Response Headers policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that includes the environment for the cluster you want to add the policy to. -4. Select the environment for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, ...), select **Edit Advanced Config**. -6. On the left menu, select **Global Policies**. -7. From the list of policies, locate the policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, ...). -8. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/rate-limit.md b/content/nms/acm/how-to/policies/rate-limit.md deleted file mode 100644 index 35b740997..000000000 --- a/content/nms/acm/how-to/policies/rate-limit.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: Learn how to use the F5 NGINX Management Suite API Connectivity Manager - Rate Limiting policy to protect backend servers. The Rate Limiting policy lets you - limit connections and the rate of requests based on request URI, client IP address, - or authenticated clients. -nd-docs: DOCS-1251 -title: Rate Limiting -toc: true -weight: 1300 -type: -- how-to ---- - -## Overview - -{{< include "acm/how-to/policies-proxy-intro" >}} - ---- - -## About the Policy - -The Rate Limit policy can be used to throttle the number of requests in a time period that enter an application. -You can specify multiple rate limit stipulations with a single policy based on the **Request URI**, **Client IP address** or the **Authenticated Client ID**. -The policy can also specify the type of traffic shaping required to allow burst traffic or two-stage rate limiting. - -### Intended Audience - -This guide is meant for F5 NGINX Management Suite Administrators who can modify or create policies on an API Gateway Proxy. - ---- - -## Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with an [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}). -- You have published one or more [API Gateways]({{< ref "/nms/acm/getting-started/publish-api-proxy" >}}). - - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default Value | -|------------------------|---------|--------------------------------|----------------------------------------------------------------------------------------------------------|----------|--------------------| -| `returnCode` | int | In range `400-599` | The return code that used when the total number of requests have been exceeded. | Yes | `429` | -| `grpcStatusCode` | int | In range `400-599` | The return code that used when the total number of requests have been exceeded. | No | `429` | -| `limits.rate` | string | Example:
      `10r/s` | The total number of requests allowed over a given amount of time. | Yes | `10r/s` | -| `limits.rateLimitBy` | string | `uri`, `consumer`, `client.ip` | The value on which to apply the rate limiting on. | Yes | `client.ip` | -| `limits.zoneSize` | string | Example:
      `10M` | The size of the shared memory buffer for the proxy. | Yes | `10M` | -| `throttle.delay` | int | Example:
      `5` | The delay parameter defines the point at which, within the burst size, excessive requests are throttled. | No | `N/A` | -| `throttle.noDelay` | boolean | `true/false` | Decides if the request should be processed immediately or stored in buffer. | No | `N/A` | -| `throttle.burst` | int | Example:
      `10` | Total number of requests that can be handled in a burst before rate limiting is exceeded. | No | `N/A` | - - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -You can apply this policy using the web interface or the REST API. The policy uses `x-correlation-id` as the default HTTP header name, or you can provide a custom header value. - -
      - -{{}} - -{{%tab name="API"%}} - -Send a `POST` request to add the Rate limit policy to the API Proxy. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "rate-limit": [ - { - "systemMetadata": { - "appliedOn": "inbound", - "context": "proxy" - }, - "action": { - "limits": [ - { - "rate": "10r/s", - "rateLimitBy": "client.ip", - "zoneSize": "10M" - } - ] - } - } - ] - } -} -``` - -This JSON example defines a Request Correlation ID policy, which specifies that an HTTP header called `x-correlation-id` should be used when passing the correlation ID. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To add a Request Correlation ID policy using the web interface: - -1. In the ACM user interface, go to **Services > \{your workspace}**, where "your workspace" is the workspace that contains the API Proxy. -1. Select **Edit Proxy** from the **Actions** menu for the desired API Proxy. -1. On the **Policies** tab, select **Add Policy** from the **Actions** menu for **Rate Limit**. -1. Multiple Rate limit stipulations can be added for a policy. -1. Configure the associated **Key**, **Limit**, **Unit** **Zone Size** and **Zone size unit** for each stipulation. -1. Optionally you can customize the type of rate limiting that is applied to the policy. Choose from one of the 3 following options - 1. **Buffer excess requests**: will allow bursts of requests to be stored in a buffer - 1. **Buffer excess requests no delay**: will allow bursts of requests to get processed immediately while there is space in the buffer - 1. **Throttle excess requests**: will enable Two-Stage rate limiting -1. Set custom error return code conditions if rate limiting **is exceeded**. -1. Select **Add** to apply the Rate Limit policy to the Proxy. Then select **Save & Publish** to deploy the configuration to the API Proxy. -1. Select **Add** to apply the policy to the cluster. -1. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} - ---- - -## Common Use Cases -The following articles describe common use cases for rate limiting: - -1. [Rate Limiting with NGINX and NGINX Plus](https://www.nginx.com/blog/rate-limiting-nginx/) -1. [Deploying NGINX as an API Gateway, Part 2: Protecting Backend Services](https://www.nginx.com/blog/deploying-nginx-plus-as-an-api-gateway-part-2-protecting-backend-services/) diff --git a/content/nms/acm/how-to/policies/request-body-size-limit.md b/content/nms/acm/how-to/policies/request-body-size-limit.md deleted file mode 100644 index ca2c93888..000000000 --- a/content/nms/acm/how-to/policies/request-body-size-limit.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -description: Learn how to configure the Request Policy Size Limit policy to prevent - Denial of Service (DoS) and other types of attacks. -nd-docs: DOCS-1122 -title: Request Body Size Limit -toc: true -weight: 1200 -type: -- how-to ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -The *Request Body Size Limit* policy, which by default is set to 1 MB, is applied to all API gateway proxy requests. If the request exceeds this limit, it will be blocked and an error code will be returned. You can adjust the limit to meet your requirements, or you can disable the policy completely by setting the max size to 0. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default | -|--------------|----------|-----------------------|----------------------------------------------------------------------------------------------------------------------------|----------|---------------------| -| `size` | string | Example:
      `1M` or `1K` |

      Sets the maximum body size for client requests.

      Megabytes, `M`, and Kilobytes, `K`, are the accepted units.

      | No | `1M` | -| `returnCode` | integer | In range:
      `400-599` |

      The error code that is returned to the client when the size of a request exceeds the configured value.

      The default error code is `413: Request Entity Too Large`.

      | No | `413` | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -You can apply this policy using either the web interface or the REST API. - -
      - -{{}} - -{{%tab name="API"%}} - -{{< call-out "note" >}}{{< include "acm/how-to/access-acm-api.md" >}}{{< /call-out>}} - -To add the Request Body Size Limit policy using the REST API, send an HTTP `POST` request to the Environments endpoint. - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Method | Endpoint | -|--------|-------------------------------------------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - - -
      -JSON request - -``` json -{ - "policies": { - "request-body-size-limit": [ - { - "action": { - "returnCode": 413, - "size": "1M" - } - } - ] - } -} -``` - -
      - -This example Request Body Size Limit policy rejects requests exceeding one megabyte and returns error code `413`. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To add a Request Body Size Limit policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that contains your cluster's environment from the list of workspaces. -4. In the **Environments** section, select the environment name for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. -6. On the left menu, select **Global Policies**. -7. Locate the **Request Body Size Limit** policy in the list of policies. On the **Actions** menu (represented by an ellipsis, `...`), select **Add Policy**. -8. On the **Request Body Size Limit** form, complete the necessary fields: - - - **Error code**: Specify the error code to return when a request exceeds the maximum size. The default is `413`. - - **Request body size**: Enter the maximum body size in megabytes or kilobytes. The default is 1 megabyte. -9. Select **Add** to apply the policy to the cluster. -10. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/request-correlation-id.md b/content/nms/acm/how-to/policies/request-correlation-id.md deleted file mode 100644 index a99b787b3..000000000 --- a/content/nms/acm/how-to/policies/request-correlation-id.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -description: Learn how to use API Connectivity Manager's Request Correlation ID policy - to add a unique identifier to each request entering your app, which you can use - to trace end-to-end transactions in a distributed system. -nd-docs: DOCS-1120 -title: Request Correlation ID -toc: true -weight: 1300 -type: -- how-to ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -Use the Request Correlation ID policy to add a unique identifier to each request that enters an application. With the Correlation ID policy, you can trace end-to-end transactions moving through components in a distributed system. This policy is applied by default and usually uses `x-correlation-id` as the default HTTP header name. However, you can also provide a custom header value if needed. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default Value | -|------------------|--------|--------------------------------|--------------------------------------------------------------|----------|--------------------| -| `httpHeaderName` | string | Example:
      `x-correlation-id` | The HTTP header name to use when passing the correlation ID. | YES | `x-correlation-id` | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -You can apply this policy using either the web interface or the REST API. The policy uses `x-correlation-id` as the default HTTP header name, or you can provide a custom header value. - -
      - -{{}} - -{{%tab name="API"%}} - -To create a Request Correlation ID policy using the REST API, send an HTTP `POST` request to the Environment endpoint. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "request-correlation-id": [ - { - "action": { - "httpHeaderName": "x-correlation-id" - } - } - ] - } -} -``` - -This JSON example defines a Request Correlation ID policy, which specifies that an HTTP header called `x-correlation-id` should be used when passing the correlation ID. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To add a Request Correlation ID policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that includes the environment for the cluster you want to add the policy to. -4. Select the environment for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. -6. On the left menu, select **Global Policies**. -7. From the list of policies, locate the **Request Correlation ID** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). -8. On the **Request Correlation ID** form, complete the necessary fields: - - - **HTTP Header Name**: The HTTP header name to use when passing the correlation ID. The default is `x-corrrelation-id`. - -9. Select **Add** to apply the policy to the cluster. -10. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/request-header-specification.md b/content/nms/acm/how-to/policies/request-header-specification.md deleted file mode 100644 index d87ca260c..000000000 --- a/content/nms/acm/how-to/policies/request-header-specification.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: Learn how to set up the Request Header Specification policy in API Connectivity - Manager to process headers with invalid characters. -nd-docs: DOCS-1263 -title: Request Header Specification -toc: true -weight: 1300 -type: -- how-to ---- - -## Overview - -{{< include "acm/how-to/policies-intro" >}} - ---- - -## About the Policy - -Use the Request Header Specification policy to allow headers that would normally be considered invalid. This can be used to treat underscores as valid or allow all special header characters. - -### Intended Audience - -{{< include "acm/how-to/policies/infra-admin-persona.md">}} - ---- - -## Workflow for Applying Policy - -To apply the policy or make changes to it, here's what you need to do: - -- [Edit an existing environment or create a new one]({{< ref "/nms/acm/how-to/infrastructure/manage-api-infrastructure.md#add-environment" >}}). -- Check the advanced settings for the environment to see if the policy has been applied. -- Edit the policy to make changes for each environment. Save and publish the changes. - ---- - -## Policy Settings - - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Field | Type | Possible Values | Description | Required | Default Value | -|------------------|--------|--------------------------------|--------------------------------------------------------------|----------|--------------------| -| `invalidHeadersBehaviour` | string | Example:
      `ALLOW_ALL` | This can be set to `IGNORE_ALL` (the default behavior for NGINX), `ALLOW_UNDERSCORE`, or `ALLOW_ALL` | YES | `ALLOW_ALL` | - -{{< /bootstrap-table >}} - - ---- - -## Applying the Policy - -You can apply this policy using either the web interface or the REST API. Configuring the policy to `invalidHeadersBehaviour: IGNORE_ALL` will result in the same behavior as not applying the policy. - -
      - -{{}} - -{{%tab name="API"%}} - -To create a Request Correlation ID policy using the REST API, send an HTTP `POST` request to the Environment endpoint. - - -{{}} - -| Method | Endpoint | -|--------|---------------------------------------------------------------------| -| `POST` | `/infrastructure/workspaces/{workspace}/environments/{environment}` | - -{{}} - - -
      -JSON request - -```json -{ - "policies": { - "request-correlation-id": [ - { - "action": { - "invalidHeadersBehaviour": "ALLOW_ALL" - } - } - ] - } -} -``` - -This JSON example defines a Request Header Specification policy. - -
      - -{{%/tab%}} - -{{%tab name="UI"%}} - -To add a Request Header Specification policy using the web interface: - -1. In a web browser, go to the FQDN for your F5 NGINX Management Suite host and log in. Then, from the Launchpad menu, select **API Connectivity Manager**. -2. On the left menu, select **Infrastructure**. -3. Choose the workspace that includes the environment for the cluster you want to add the policy to. -4. Select the environment for your cluster. -5. In the list of clusters, locate the cluster you want to add the policy to. On the **Actions** menu (represented by an ellipsis, `...`), select **Edit Advanced Config**. -6. On the left menu, select **Global Policies**. -7. From the list of policies, locate the **Request Header Specification** policy, then select **Add Policy** from the **Actions** menu (represented by an ellipsis, `...`). -8. On the **Request Header Specification** form, choose which configuration is appropriate for your environment. -9. Select **Add** to apply the policy to the cluster. -10. Select **Save and Submit** to deploy the configuration. - -{{%/tab%}} - -{{
      }} diff --git a/content/nms/acm/how-to/policies/tls-policies.md b/content/nms/acm/how-to/policies/tls-policies.md deleted file mode 100644 index eb69cb412..000000000 --- a/content/nms/acm/how-to/policies/tls-policies.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - secure communications by applying TLS policies. -nd-docs: DOCS-926 -toc: true -weight: 1400 -title: TLS -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -{{< include "acm/how-to/policies-intro.md" >}} - -The types of communication you can apply TLS policies to includes: - -- ingress traffic to an API or Dev Portal proxy; -- communications between an API proxy and a backend API service; and -- communications between the API Connectivity Manager management plane and the Dev Portal data plane. - ---- - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with [API Gateway]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) or [Dev Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) clusters. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -### How to Access the REST API - -{{< include "acm/how-to/access-acm-api" >}} - ---- - -## Secure Ingress Traffic - -Take the steps in this section to secure the traffic coming into your API Proxies. - -### Add TLS Listener - -{{}} - {{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. -1. Select **Edit Advanced Config** from the **Actions** menu for the desired Developer Portal. -1. On the **Listeners** tab, select **Add Listener**. -1. Provide the desired **Protocol** and **Port** (for example, `443`) and select the **TLS** checkbox. - - {{%/tab%}} - {{%tab name="API"%}} - -```json -"listeners": [ - { - "transportProtocol": "HTTP", - "port": 443, - "tlsEnabled": true, - "ipv6": false - } -``` - - {{%/tab%}} -{{}} - -### Add TLS Inbound Policy {#add-tls-inbound} - -{{}} -{{%tab name="UI"%}} - -1. Select the **Global Policies** tab. -1. Select **Add Policy** from the **Actions** menu for the **TLS Inbound** policy. -1. On the **TLS Inbound** policy page, provide the requested information. - - - **Protocols:** The TLS and SSL protocols that will be used for securing communication. - - **Cipher:** The set of algorithms or a set of instructions/steps that helps to establish the secure connection. - - **Session Timeout:** Specifies the time during which a client may reuse the session parameters. - - **Session Cache:** Sets whether a session can be re-used. When off, a full negotiation is performed for every connection. - - **Session Type:** Determines the cache type for re-using sessions. - - **Session Size:** Sets the shared cache size. - -1. Upload a Server Certificate, Certificate Key, and CA Certificate. - - - Select the upload icon in the **Server Certificate** field and browse for the desired certificate on your file system. - - Select the upload icon in the **Certificate Key** field and browse for the desired key file on your file system. - - Select the upload icon in the **CA Certificates** field and browse for the desired Root CA certificate on your file system. - -1. (Optional) Select the **Verify Client Certificate** toggle and complete the configurations as appropriate. -1. Select **Add** to save and add the policy. -1. Select **Save and Submit**. - -{{%/tab%}} -{{%tab name="API"%}} - -```json -"policies": { - "tls-inbound": [ - { - "data": { - "serverCerts": [ - { - "key": {{tlsKey}}, - "cert": {{tlsCert}} - } - ], - "trustedRootCACerts":{{caCert}} - } - } - ] -} -``` - -{{%/tab%}} -{{}} - -### Verify HTTPS Connection - -Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. -You can then navigate to the Dev Portal user interface to verify that your connection is secured using HTTPS. - ---- - -## Secure Communications between API Proxy and Backend Service - -Take the steps in this section to secure the communications between your Proxies and the associated API backend services. When mTLS is enabled, the API Gateway identifies itself to the backend service using an SSL client certificate. - -### Add TLS Backend Policy {#add-tls-backend} - -{{}} -{{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the API Gateway to be updated. -1. Select **Edit Advanced Config** from the **Actions** menu for the desired API Gateway. -1. Select the **Global Policies** tab, then select **Add Policy** from the **Actions** menu for the **TLS Backend** policy. -1. On the **TLS Backend** policy page, provide the requested information. - - - **Protocols:** The TLS and SSL protocols that will be used for securing communication to the proxied server. - - **Cipher:** The set of algorithms or a set of instructions/steps that helps to establish the secure connection to the proxied server. - - **Verify Certificate Chain Depth:** Sets the verification depth in the client certificates chain. - -1. Upload a Client Certificate, Certificate Key, and CA Certificate. - - - Select the upload icon in the **Client Certificate** field and browse for the desired certificate on your file system. - - Select the upload icon in the **Certificate Key** field and browse for the desired key file on your file system. - - (Optional) Provide the Client ID and select the upload icon to upload a Trusted Root CA, then browse for the desired Root CA certificate on your file system. - -1. Select **Add** to save and add the policy. -1. Select **Save and Submit**. - -{{%/tab%}} -{{%tab name="API"%}} - -```json -"policies": { - "tls-backend": [ - { "action": { - "cipher": "HIGH:!aNULL:!MD5", - "protocols": [ - "TLSv1.2" - ] - }, - "data": { - "trustedRootCACerts":"{{caCert}}", - "clientCerts": [ - { - "cert": "{{clientCert}}", - "key": "{{clientKey}}" - - } - ] - } - } - ] -} -``` - -{{%/tab%}} -{{}} - -Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. - ---- - -## Secure Communications Between API Connectivity Manager and Dev Portal Hosts - -Take the steps in this section to secure communications between the API Connectivity Manager management plane and Dev Portal hosts. - -### Add TLS Policies to External Developer Portal {#tls-external-cluster} - -{{}} -{{%tab name="UI"%}} - -1. In the API Connectivity Manager user interface, go to **Workspaces > Environments > \**, where "your environment" is the Environment that contains the Developer Portal. -1. Add the [TLS Inbound](#add-tls-inbound) and [TLS Backend](#add-tls-backend) policies to your Developer Portal. -1. Save and submit your changes. - -{{%/tab%}} -{{%tab name="API"%}} - -```json -{ - "name": "{{environmentName}}", - "functions": [ - "DEVPORTAL" - ], - "proxies": [ - { - "proxyClusterName": "{{portalInstanceGName}}", - "hostnames": [ - "{{portalEnvironmentHostname}}" - ], - "runtime": "PORTAL-PROXY", - "policies": { - "tls-inbound": [ - { - "action": { - "cipher": "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5", - "protocols": [ - "TLSv1.2" - ], - "sessionCache": { - "enable": "on", - "size": "10M", - "type": "shared" - }, - "sessionTimeout": "5m" - }, - "data": { - "trustedRootCACerts": { - "clientId": "clientId1", - "cert": "{{}}" - }, - "serverCerts": [ - { - "key": {{tlsServerKey}}, - "cert": {{tlsServerCert}} - } - ] - } - } - ], - "tls-backend": [ - { - "action": { - "cipher": "HIGH:!aNULL:!MD5", - "sessionReuse": false, - "proxyServerName": false, - "protocols": [ - "TLSv1.2" - ] - }, - "data": { - "trustedRootCACerts":"{{caCert}}", - "clientCerts": [ - { - "key": {{tlsClientKey}}, - "cert": {{tlsClientCert}} - } - ] - } - } - ] - } - } - ] -} -``` - -{{%/tab%}} -{{}} - -### Secure Communication between Portal and API Connectivity Manager using TLS Policies {#tls-internal-cluster} - -{{}} -{{%tab name="UI"%}} - -1. Select **Edit Portal <-> API Connectivity Manager Connectivity** from the **Actions** menu for your desired developer portal. -1. [Add TLS Listener(s)](#add-a-tls-listener). -1. Add the [TLS Inbound](#add-tls-inbound) policy. - - - Complete the fields as desired. - - Upload the Server Certificate and Certificate Key. - - On the same **TLS Inbound** policy page, select the **Verify Client Certificate** option. - - Provide the Certificate Authority (CA) certificates and a Client ID. - - Select **Add**. - -1. Add the [TLS Backend](#add-tls-backend) policy. - - - Complete the fields as desired. - - Upload the Client Certificate and Certificate Key. - - Select **Add**. - -1. Save and submit your changes. - -{{%/tab%}} -{{%tab name="API"%}} - -```json -{ - "name": "{{environmentName}}", - "functions": [ - "DEVPORTAL" - ], - "proxies": [ - { - "proxyClusterName": "{{portalInstanceGName}}", - "hostnames": [ - "acm.{{portalEnvironmentHostname}}" - ], - "runtime": "PORTAL-PROXY", - "listeners": [ - { - "transportProtocol": "HTTP", - "port": 443, - "tlsEnabled": true, - "ipv6": false - } - ], - "policies": { - "tls-inbound": [ - { - "action": { - "cipher": "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5", - "protocols": [ - "TLSv1.2" - ], - "sessionCache": { - "enable": "on", - "size": "10M", - "type": "shared" - }, - "sessionTimeout": "5m", - "enableMTLS": { - "certVerify": true, - "certChainVerifyDepth": 2 - } - }, - "data": { - "serverCerts": [ - { - "key": {{tlsServerKey}}, - "cert": {{tlsServerCert}} - } - ], - "clientCerts": [ - { - "clientId": "client-1", - "cert": {{caCert}}, - } - ] - } - } - ], - "tls-backend": [ - { - "action": { - "cipher": "HIGH:!aNULL:!MD5", - "sessionReuse": false, - "proxyServerName": false, - "protocols": [ - "TLSv1.2" - ] - }, - "data": { - "clientCerts": [ - { - "key": {{tlsClientKey}}, - "cert": {{tlsClientCert}} - } - ] - } - } - ] - } - } - ] -} -``` - -{{%/tab%}} -{{}} - -Once the Environment configuration has been submitted and applied, the **Job Status** for the Environment will change to **Success**. - diff --git a/content/nms/acm/how-to/services/_index.md b/content/nms/acm/how-to/services/_index.md deleted file mode 100644 index acaac100f..000000000 --- a/content/nms/acm/how-to/services/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Services -weight: 200 -url: /nginx-management-suite/acm/how-to/services/ ---- \ No newline at end of file diff --git a/content/nms/acm/how-to/services/publish-api.md b/content/nms/acm/how-to/services/publish-api.md deleted file mode 100644 index 010f85f6b..000000000 --- a/content/nms/acm/how-to/services/publish-api.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - publish APIs to your API Gateway. -nd-docs: DOCS-927 -title: Publish an HTTP API -toc: true -weight: 100 -type: -- how-to ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -API Connectivity Manager lets you manage your API infrastructure by using a set of hierarchical resources. The top-level resource, called a **Workspace**, provides a logical grouping for resources called **Environments**. Environments contain **Clusters** that assign NGINX instances for use as API Gateways and Developer Portals. - -This topic describes how to publish an API config to a cluster. - -### Before You Begin - -Complete the following prerequisites before proceeding with this guide: - -- API Connectivity Manager is installed, licensed, and running. -- You have one or more Environments with [API Gateway]({{}}) or [Dev Portal]({{< ref "/nms/acm/getting-started/add-devportal" >}}) clusters. - -### How to Access the User Interface - -{{< include "acm/how-to/access-acm-ui" >}} - -## Create a Service Workspace - -{{< call-out "note" >}} -The API Connectivity Manager admin must verify that the user (API Onwer) has CRUD [RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) permissions for the services feature. -{{< /call-out >}} - -Service Workspaces let you group API configurations. Publishing an API requires at least one Service Workspace. - -To create a Service Workspace you need to do the following: - -1. On the sidebar, select **Services**. If this is your first time on the page, you'll see a prompt for creating a new workspace. If not, select the **+Add** button in the top-right corner. -2. Enter a name, description, and any other information you want to provide. -3. Select **Create**. - -## Next Steps - -After creating a Service Workspace, two options are displayed: - -**Publish API Proxy** and **Publish API Doc** - -## Publish an API Proxy {#publish-api-proxy} - -1. Enter the required information in the fields provided. -2. **Service Target Hostname** should point to the backend service you want this API to resolve to. -3. If you choose not to use an OpenAPI spec then you need to add some extra information. -4. For **Gateway Proxy Hostname**, select the hostname of the environment you want to associate with the API. -5. **Base Path** and **Version** build up the URI, for example **/api/v1/**. -6. Select **Publish** to save and publish your API Proxy. - -{{< call-out "note" >}} -If you choose to use an OpenAPI spec, it will get processed into a config and published. -{{< /call-out >}} - -## Advanced Configurations {#advanced-configurations} - -After publishing the API Proxy, a link to **Edit Advanced Configurations** is displayed. -If you want to create more advanced routing configurations, select this option. -You can upload an OpenAPI spec here too, which has all the necessary API and routing information. - -{{< include "acm/openapi-support" >}} - -To add an Advanced Routing configuration, select the **Ingress** menu item in the advanced section of the menu. - -1. Select **Add Route** in the **Advanced Routes** section. -2. Fill out the required information in the form presented to you. -3. **Match URI** is the value you want to match on for queries. -4. Choose the required **HTTP Method** you want to use for this route match. -5. Change the **Target Backend Service Label** if required to target a specific backend based on the label value. -6. Select **Add Parameter** to add a parameter to the Path, Query, or Header that's used to match on the route. - {{< call-out "note" >}}If you choose a path parameter then you must have a placeholder for that parameter in **Match URI**.{{< /call-out >}} -7. Select **Add** to finish adding the route. -8. Select **Next** to move to the **Backend** configuration page. - -### Backends - -Backends tell your API where to resolve the queries to, for example your backend server. - -You can add, edit, or delete Backends. - -You can also set [DNS](https://en.wikipedia.org/wiki/Domain_Name_System) resolvers and [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) on the backend. - -### Policies - -This section ensures you can set policies at the individual API level. - -Check the [Manage Policies]({{< ref "/nms/acm/how-to/policies/manage-policies.md" >}}) documentation for more information. - -## Publish an API Proxy using an OpenAPI Spec - -In the [**Publish an API Proxy**](#publish-api-proxy) form, select the option to use an OpenAPI spec and choose one from the list of existing specs. - -You may also upload a new OpenAPI spec in this form by selecting **+Add API Spec** and uploading the new spec in the file input. - -{{< include "acm/openapi-support" >}} - -## Update a Published API Proxy - -1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the workspace containing the API proxy you want to edit. -2. Select **Edit Proxy** from the **Actions** menu of the Proxy you want to delete. -3. Edit as needed. -4. Select **Save and Publish**. - -{{< call-out "note" >}} -Certain sections can't be modified for API Proxies created with OpenAPI Specs, for example, **Advanced Routing** in the **Ingress** step. -{{< /call-out >}} - -## Delete a Published API Proxy - -1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the name of the workspace containing the API proxy you want to delete. -2. Select **Delete Proxy** from the **Actions** menu of the Proxy you want to delete. - -## What's Next - -- [Manage Policies]({{< ref "/nms/acm/how-to/policies/manage-policies.md" >}}) -- [Publish a Developer Portal]({{< ref "/nms/acm/getting-started/add-devportal.md" >}}) diff --git a/content/nms/acm/how-to/services/publish-gRPC-proxy.md b/content/nms/acm/how-to/services/publish-gRPC-proxy.md deleted file mode 100644 index 0ca95cd7e..000000000 --- a/content/nms/acm/how-to/services/publish-gRPC-proxy.md +++ /dev/null @@ -1,411 +0,0 @@ ---- -description: Learn how to use F5 NGINX Management Suite API Connectivity Manager to - publish a gRPC Proxy and manage traffic to gRPC services. -nd-docs: DOCS-997 -title: Publish a gRPC API Proxy -toc: true -weight: 300 -type: -- how-to ---- - -{{< shortversions "1.2.0" "latest" "acmvers" >}} - -## Overview - -gRPC has emerged as an alternative approach to building distributed applications, particularly microservice applications. API Connectivity Manager supports publishing gRPC services. -The following document describes how to publish a gRPC API proxy using the API Connectivity Manager API or UI. Additionally, this guide outlines the process of setting up a gRPC Echo Server to validate the functionality of the published proxy. - - -## Publish a gRPC API Proxy with Package-level Routing - -{{}} -{{%tab name="API"%}} - -Send a POST request to publish the gRPC API proxy. - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - -```json -{ - "name": "dev-grpc-hello", - "version": "v1", - "proxyConfig": { - "hostname": "example.com", - "grpcIngress": { - "service": "helloworld." - }, - "backends": [ - { - "serviceName": "hello-grpc-service-name", - "serviceTargets": [ - { - "hostname": "grpc-backend.example.com", - "listener": { - "enableTLS": false, - "port": 50051, - "transportProtocol": "GRPC" - } - } - ] - } - ] - } -} -``` - - - - -{{%/tab%}} -{{%tab name="UI"%}} - -1. Open a service workspace. -1. Select **Publish to proxy**. -1. Type a **Backend Service** name. -1. Enter a **Service Target Hostname**. -1. Select GRPC in the **Service Target Transport Protocol** menu. -1. Enter the **Service Target Port**. -1. Enter an **API Proxy** name. -1. Select a **Gateway Proxy Hostname** in the menu. -1. Enter the **Service name** and **Version**; for this example, we use "helloworld" and "v1". -1. Select **Publish**. - -You should now have a published gRPC API proxy with a Lifecycle Status of success. - -{{%/tab%}} -{{}} - -## Publish a gRPC API Proxy with Service-Level Routing - -{{}} -{{%tab name="API"%}} - -Send a POST request to publish the gRPC proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "name": "dev-grpc-hello", - "version": "v1", - "proxyConfig": { - "hostname": "example.com", - "grpcIngress": { - "service": "helloworld.Greeter" - }, - "backends": [ - { - "serviceName": "hello-grpc-service-name", - "serviceTargets": [ - { - "hostname": "grpc-backend.example.com", - "listener": { - "enableTLS": false, - "port": 50051, - "transportProtocol": "GRPC" - } - } - ] - } - ] - } -} -``` - - - - -{{%/tab%}} -{{%tab name="UI"%}} - -To configure the proxy to route by service: - -1. Open the proxy and select **Ingress**. -1. Type "helloWorld.Greeter" in the **Service Name** field. -1. Select **Save and Publish**. - -{{%/tab%}} -{{}} - - -## Publish a gRPC API Proxy with Advanced Routes with a gRPC Method - -{{}} -{{%tab name="API"%}} - -Send a POST request to publish the gRPC proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "name": "dev-grpc-hello", - "version": "v1", - "proxyConfig": { - "hostname": "example.com", - "grpcIngress": { - "service": "helloworld.Greeter", - "routes": [ - { - "method": "SayGoodbye", - "targetBackendServiceLabel": "default" - }, - { - "method": "SayHello", - "targetBackendServiceLabel": "default" - } - ] - }, - "backends": [ - { - "serviceName": "hello-grpc-service-name", - "serviceTargets": [ - { - "hostname": "grpc-backend.example.com", - "listener": { - "enableTLS": false, - "port": 50051, - "transportProtocol": "GRPC" - } - } - ] - } - ] - } -} -``` - - - - -{{%/tab%}} -{{%tab name="UI"%}} - - -To configure the proxy with an advanced route - -1. Open the proxy and select the **Ingress**. -1. Select **Add route** and enter the **GRPC Method**; for example, "SayGoodbye". -1. Select **Save and Publish**. -1. Proceed to [Set Up gRPC Echo Server and Test gRPC API Proxy](#setup-grpc-echo-server-optional) for the next steps. - -{{%/tab%}} -{{}} - - -## Service-Level Routing using Labels -{{}} -{{%tab name="API"%}} - -Send a POST request to publish the gRPC proxy. - - -{{}} - -| Method | Endpoint | -|----------|---------------------------------------------------------| -| `POST` | `/services/workspaces//proxies` | - -{{}} - - -```json -{ - "name": "dev-grpc-hello", - "version": "v1", - "proxyConfig": { - "hostname": "example.com", - "grpcIngress": { - "service": "helloworld.Greeter", - "routes": [ - { - "method": "SayGoodbye", - "targetBackendServiceLabel": "custom" - }, - { - "method": "SayHello", - "targetBackendServiceLabel": "default" - } - ] - }, - "backends": [ - {"label": { - "targetName": "default" - }, - "serviceName": "hello-grpc-service-name", - "serviceTargets": [ - { - "hostname": "grpc-backend.example.com.1", - "listener": { - "enableTLS": false, - "port": 50051, - "transportProtocol": "GRPC" - } - } - ] - }, - { - "label": { - "targetName": "custom" - }, - "serviceName": "hello-grpc-service-name", - "serviceTargets": [ - { - "hostname": "grpc-backend.example.com.2", - "listener": { - "enableTLS": false, - "port": 50051, - "transportProtocol": "GRPC" - } - } - ] - } - ] - } -} -``` - - - - - -{{%/tab%}} -{{%tab name="UI"%}} - -If you have multiple backend servers and want to route to a specific backend server, you can use labels. - -1. Open the proxy and select **Backend**. -1. Enter a **Service Name** and **Service Version**. -1. Add a label for the backend service, "custom2". -1. Type the **Service Target Hostname**. -1. Select **Add**. -1. Select **Save and Publish**. -1. [Setup gRPC Echo Server and Test gRPC API Proxy](#setup-grpc-echo-server-optional). - -{{%/tab%}} -{{}} - -## Backends - -Backends specify where your API should send queries, such as to your backend server. - -You can add, edit, or delete backends. - -You can also set [DNS](https://en.wikipedia.org/wiki/Domain_Name_System) resolvers and [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) on the backend. - -## Policies - -This section ensures you can set policies at the individual API level. - -For more information, refer to the [Manage Policies]({{< ref "/nms/acm/how-to/policies/manage-policies.md" >}}) documentation. - -## Update a Published API Proxy - -1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the workspace containing the API proxy you want to edit. -2. Select **Edit Proxy** from the **Actions** menu of the Proxy you want to delete. -3. Edit as needed. -4. Select **Save and Publish**. - -## Delete a Published API Proxy - -1. On the sidebar, select **Services**. Then on the Services Workspaces page, select the name of the workspace containing the API proxy you want to delete. -2. Select **Delete Proxy** from the **Actions** menu of the Proxy you want to delete. - -## Set Up gRPC Echo Server (Optional) {#setup-grpc-echo-server-optional} - -This section explains how to set up a gRPC echo server to verify that the gRPC API works as expected. - -From a command line terminal: - -1. Create a virtual environment and install the required packages: - - ```shell - virtualenv echo-servers - source echo-servers/bin/activate - pip install grpcio protobuf grpcio-tools - ``` - -1. Create a file named `helloworld.proto` and add the following content: - - ```shell - syntax = "proto3"; - - package helloworld; - - service Greeter { - rpc SayHello (HelloRequest) returns (HelloReply) {} - rpc SayGoodbye (GoodbyeRequest) returns (GoodbyeReply) {} - } - - message HelloRequest { - string name = 1; - } - - message HelloReply { - string message = 1; - } - - message GoodbyeRequest { - string name = 1; - } - - message GoodbyeReply { - string message = 1; - } - ``` - -1. Run the following command to generate the python code: `python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. helloworld.proto` -1. Create `server.py` Add the following to the file: - - ```shell - import grpc - import helloworld_pb2 - import helloworld_pb2_grpc - from concurrent import futures - - class GreeterServicer(helloworld_pb2_grpc.GreeterServicer): - def SayHello(self, request, context): - response = helloworld_pb2.HelloReply(message='Hello, ' + request.name) - return response - - def SayGoodbye(self, request, context): - response = helloworld_pb2.GoodbyeReply(message='Goodbye, ' + request.name) - return response - - def serve(): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - helloworld_pb2_grpc.add_GreeterServicer_to_server(GreeterServicer(), server) - server.add_insecure_port('[::]:50051') - server.start() - server.wait_for_termination() - - if __name__ == '__main__': - serve() - ``` - -1. Run `python server.py`. -1. To confirm the server is running, run the command `netstat -tulpn | grep 50051`. -1. For step-by-step instructions on how to set up gRPC testing using Postman, refer to [Testing gRPC APIs with Postman](https://blog.postman.com/testing-grpc-apis-with-postman/). This guide will help you test and validate your gRPC APIs effectively using Postman. diff --git a/content/nms/acm/releases/_index.md b/content/nms/acm/releases/_index.md deleted file mode 100644 index 2cf3243ea..000000000 --- a/content/nms/acm/releases/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Releases -description: "Stay up-to-date with the latest F5 NGINX Management Suite API Connectivity Manager releases." -weight: 800 -url: /nginx-management-suite/acm/releases/ ---- \ No newline at end of file diff --git a/content/nms/acm/releases/known-issues.md b/content/nms/acm/releases/known-issues.md deleted file mode 100644 index 6730f887e..000000000 --- a/content/nms/acm/releases/known-issues.md +++ /dev/null @@ -1,846 +0,0 @@ ---- -description: This document is a summary of the known issues in F5 NGINX Management - Suite API Connectivity Manager. Fixed issues are removed after **45 days**.

      We - recommend upgrading to the latest version of API Connectivity Manager to take advantage - of new features, improvements, and bug fixes.

      -nd-docs: DOCS-930 -title: Known Issues -toc: true -weight: 200 -type: -- reference ---- - - ---- - -## 1.9.2 -March 14, 2024 - -### {{% icon-bug %}} Helm chart backup and restore is broken {#44766} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 44766 | Open | - -{{}} -#### Description -Helm backup and restore will not run in ACM-1.9.1 on NMS-2.15.x due to an underlying change in the dqlite client. - -#### Workaround - -None - ---- - -## 1.9.1 -October 05, 2023 - -### {{% icon-resolved %}} JWT tokens are overwritten when multiple proxies are assigned to one gateway {#44636} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 44636 | Fixed in API Connectivity Manager 1.9.2 | - -{{}} -#### Description -When multiple API proxies, each with its own JSON Web Token Assertion policy, are assigned to one gateway, the directives are overwritten by one another. - -#### Workaround - -None - ---- - -## 1.9.0 -September 07, 2023 - -### {{% icon-resolved %}} Module crashes when an OpenAPI spec is uploaded with a global security requirement that contains an empty security requirement object {#44393} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 44393 | Fixed in API Connectivity Manager 1.9.1 | - -{{}} -#### Description -API Connectivity Manager crashes when an OpenAPI specification file is uploaded with a global security requirement block containing an empty `security` object. - -Example OpenAPI security requirement with empty security object: - -```none -"security": [{}] -``` - ---- - -## 1.8.0 -July 27, 2023 - -### {{% icon-resolved %}} Cannot use TLS enabled backend with HTTP backend-config policy {#44212} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 44212 | Fixed in API Connectivity Manager 1.9.0 | - -{{}} -#### Description -When configuring a backend-config policy with the transport protocol set to HTTP for an API, if TLS is enabled on that APIs backend, then the configuration will fail with the following error in the API Connectivity Manager log file: - "Backend Config policy failed when checking transport protocol match because of: the backend-config policy transport protocol http does not match the proxy backend transport protocol https" - ---- - -### {{% icon-resolved %}} Deployment fails due to duplicate locations {#43673} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 43673 | Fixed in API Connectivity Manager 1.9.0 | - -{{}} -#### Description -When more than one version of an API is published and Append Rule is set to "None", the deployment fails due to duplicate locations. - ---- - -### {{% icon-resolved %}} Certificates associated with empty instance groups can be deleted, resulting in a broken reference in the API Connectivity Manager module {#43671} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 43671 | Fixed in API Connectivity Manager 1.9.0 | - -{{}} -#### Description -In the Instance Manager **Certificates and Keys** web interface, you can delete API Connectivity Manager TLS Policy certificates associated with empty instance groups. However, this action may lead to a broken reference problem in the API Connectivity Manager module, resulting in the inability to modify or delete the broken Environment from the web interface. - -#### Workaround - -You can delete the Environment using the API if it cannot be modified or deleted using the web interface. - ---- - -## 1.7.0 -June 21, 2023 - -### {{% icon-resolved %}} Environments with WAF enabled may transition to a Failed status when a Developer Portal cluster is added. {#43231} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 43231 | Fixed in API Connectivity Manager 1.8.0 | - -{{}} -#### Description -If you add a Developer Portal cluster to an environment that has WAF enabled, the environment may transition to a `Failed` status. If this happens, follow the steps in the workaround. - -#### Workaround - -On the Developer Portal: - -1. Open an SSH connection to the Developer Portal and log in. -2. [Install F5 NGINX App Protect]({{< ref "/nap-waf/v4/admin-guide/install.md" >}}). -3. Stop the NGINX Agent: - - ```bash - sudo systemctl stop nginx-agent - ``` - -4. Run the onboarding command to add the Developer Cluster: - - ```bash - curl -k https:///install/nginx-agent > install.sh && sudo sh install.sh -g -m precompiled-publication --nap-monitoring true && sudo systemctl start nginx-agent - ``` - - Replace `` with the fully qualified domain name of your NGINX Management Suite, and `` with the name of the Developer Cluster. - - -5. Confirm the NGINX Agent is started and restart if necessary: - - ```bash - sudo systemctl status nginx-agent - sudo systemctl start nginx-agent - ``` - ---- - -### {{% icon-resolved %}} Resources deployed to a Developer Portal which has had its database reset cannot be updated or removed {#43140} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 43140 | Fixed in API Connectivity Manager 1.9.0 | - -{{}} -#### Description -It is not possible to remove resources from API Connectivity Manager which have been published to a Developer Portal if the Developer Portal database is cleared. - ---- - -## 1.6.0 -May 11, 2023 - -### {{% icon-resolved %}} Multiple entries selected when gateway proxy hostnames are the same {#42515} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 42515 | Fixed in API Connectivity Manager 1.7.0 | - -{{}} -#### Description -Multiple entries are selected when gateway proxy hostnames are the same. - -#### Workaround - -There is no impact to functionality. - ---- - -### {{% icon-resolved %}} The routes filter under the proxy metrics page won’t work with params {#42471} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 42471 | Fixed in API Connectivity Manager 1.7.0 | - -{{}} -#### Description -The routes filter under the proxy metrics page won’t work with params currently. - -For example, `/api/v1/shops/{shopID}` - -The API won’t match on the above route. - ---- - -## 1.5.0 -March 28, 2023 - -### {{% icon-bug %}} Using policies with targetPolicyName set to anything other than the default value can cause unexpected results. {#42682} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 42682 | Open | - -{{}} -#### Description -Creating a policy with metadata of “targetPolicyName” set to anything but default can cause issues with secrets being duplicated if more than one policy is created. Setting this value to anything but the default value will also cause the policy to not be applied. The policy may be shown as applied in the UI when it is not. - -#### Workaround - -Do not modify the “targetPolicyName” to be anything but the default value. - ---- - -### {{% icon-resolved %}} Array values in token claims are treated as string values {#42388} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 42388 | Fixed in API Connectivity Manager 1.6.0 | - -{{}} -#### Description -When an Access Control Routing match rule targeted a token value that contained an array, the array was collapsed into a comma-separated string. However, the expected behavior is for rules targeting arrays to pass if any value within the array matches the condition, rather than requiring the entire array to match. - ---- - -### {{% icon-resolved %}} Developer Portal: When typing the links to use for the footer, the text boxes keep losing focus {#41626} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 41626 | Fixed in API Connectivity Manager 1.6.0 | - -{{}} -#### Description -The **Text to Display** and **URL** boxes on the Developer Portal's _Configure Footer_ page lose focus when text is being typed. - -#### Workaround - -You may need to click back into the boxes several times while typing to regain focus. - ---- - -### {{% icon-resolved %}} TLS setting on listener is not reset when TLS policy is removed {#41426} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 41426 | Fixed in API Connectivity Manager 1.6.0 | - -{{}} -#### Description -When a TLS policy is removed from an environment, the web interface will not automatically adjust the TLS setting on the listener. As a result, the listener will remain in the `TLS enabled` state, leading to an unsuccessful attempt to save and publish the environment. - -#### Workaround - -Toggle the TLS setting in the web interface when removing the TLS policy from an environment. - ---- - -## 1.4.0 -January 23, 2023 - -### {{% icon-resolved %}} Cluster and Environment deletion issues when Portal Docs are published {#40163} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 40163 | Fixed in API Connectivity Manager 1.4.1 | - -{{}} -#### Description -When a developer portal proxy is hosting API documentation, the infrastructure admin is, in some cases, unable to delete clusters in other unrelated Environments and, therefore, unable to delete those same Environments. - ---- - -### {{% icon-resolved %}} The Proxy Cluster API isn't ready to be used {#40097} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 40097 | Fixed in API Connectivity Manager 1.5.0 | - -{{}} -#### Description -The API Connectivity Manager API documentation has inadvertently released details of Proxy Cluster endpoints and related policies before their public launch. Consequently, the following Proxy Cluster endpoints and global policies should not be used yet. - -The following Proxy Cluster endpoints are not ready for use: - -- `/infrastructure/workspaces/{workspaceName}/proxy-clusters` -- `/infrastructure/workspaces/{workspaceName}/proxy-clusters/{name}` - -The following global policies are not yet ready for use: - -- cluster-zone-sync -- cluster-wide-config - -A later version of the release notes will inform you when these endpoints and policies are ready. - ---- - -### {{% icon-resolved %}} Configurations aren't pushed to newly onboarded instances if another instance is offline {#40035} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 40035 | Fixed in API Connectivity Manager 1.5.0 | - -{{}} -#### Description -When a new instance is onboarded, it will not be configured if any other instances are offline. - -#### Workaround - -After onboarding the instance as usual, push the existing configuration again to the new instance, without making any changes. - ---- - -## 1.3.0 -December 12, 2022 - -### {{% icon-resolved %}} OIDC policy cannot be applied alongside a proxy authentication policy {#39604} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 39604 | Fixed in API Connectivity Manager 1.4.0 | - -{{}} -#### Description -It is not possible to use both an OpenID Connect (OIDC) policy and a proxy authentication policy concurrently. - ---- - -### {{% icon-resolved %}} The web interface doesn't pass the `enableSNI` property for the TLS backend policy {#39445} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 39445 | Fixed in API Connectivity Manager 1.3.1 | - -{{}} -#### Description -When configuring a TLS backend policy in the web interface, the new `enableSNI` property does not match the value of the deprecated `proxyServerName` property, resulting in an API error. The `enableSNI` value must be the same as `proxyServerName` value. - -#### Workaround - -Use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint, providing the correct values for `enableSNI` and `proxyServerName`. Both values must match. - -{{< raw-html>}}
      {{}} -{{}} -| Method | Endpoint | -|--------|--------------------------------------------------------------------------------------| -| PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | -{{}} -{{< raw-html>}}
      {{}} - ---- - -### {{% icon-resolved %}} A JWT token present in a query parameter is not proxied to the backend for advanced routes {#39328} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 39328 | Fixed in API Connectivity Manager 1.4.0 | - -{{}} -#### Description -When using JWT authentication with advanced routes, a JWT token that is provided as a query parameter will not be proxied to the backend service. - -#### Workaround - -Pass the JWT token as a header instead of providing the JWT token as a query parameter. - ---- - -## 1.2.0 -October 18, 2022 - -### {{% icon-resolved %}} Developer Portal backend information is unintentionally updated when editing clusters within an environment {#39409} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 39409 | Fixed in API Connectivity Manager 1.3.1 | - -{{}} -#### Description -The Developer Portal backend information may be inadvertently updated in the following circumstances: - -1. If you have multiple Developer Portal clusters and update the backend information (for example, enable TLS or change the host or port, etc. ) for any of those clusters, the update is applied to all of the clusters. - -2. If you have one or more Developer Portal clusters and update any other cluster in the environment (for example, the API Gateway or Developer Portal Internal cluster), the backend settings for the Developer Clusters are reset to their defaults (127.0.0.1/8080/no TSL). - -#### Workaround - -- Workaround for scenario #1 - - Use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint with the correct backend settings for each Developer Portal cluster: - - {{< raw-html>}}
      {{}} - {{}} - | Method | Endpoint | - |--------|--------------------------------------------------------------------------------------| - | PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | - {{}} - {{< raw-html>}}
      {{}} - -- Workaround for scenario #2 - - If you have just one Developer Portal cluster, you can use the web interface to update the backend settings for the cluster if you're not using the default settings. - - If you have more than one Developer Portal cluster, use the NGINX Management Suite API Connectivity Manager REST API to send a PUT request to the following endpoint with the correct backend settings for each cluster: - - {{< raw-html>}}
      {{}} - {{}} - | Method | Endpoint | - |--------|--------------------------------------------------------------------------------------| - | PUT | `/infrastructure/workspaces/{{infraWorkspaceName}}/environments/{{environmentName}}` | - {{}} - {{< raw-html>}}
      {{}} - ---- - -### {{% icon-resolved %}} The user interface is erroneously including irrelevant information on the TLS inbound policy workflow {#38046} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 38046 | Fixed in API Connectivity Manager 1.3.0 | - -{{}} -#### Description -On the TLS inbound policy, toggling `Enable Client Verification` On/Off results in the user interface adding irrelevant information that causes the publish to fail due to validation error. - -#### Workaround - -Dismiss the policy without saving and restart the UI workflow to add the TLS inbound policy. - ---- - -### {{% icon-resolved %}} Portals secured with TLS policy require additional environment configuration prior to publishing API docs {#38028} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 38028 | Fixed in API Connectivity Manager 1.3.0 | - -{{}} -#### Description -When the `tls-backend` policy is applied on a developer portal cluster, the communication between the portal UI and portal backend service is secured. By default, when the portal cluster is created, and the backend is not explicitly specified in the payload, it defaults to HTTP. Adding the tls-backend policy does not automatically upgrade the protocol to HTTPS. If the protocol is not set to HTTPS, publishing API docs to the portal will fail. The user has to explicitly change the backend protocol to HTTPS. - -#### Workaround - -In the user interface, navigate to Workspace > Environment > Developer Portal Clusters > Edit Advanced Config. Select "edit the Backend" and toggle the Enable TLS switch to enabled. - ---- - -### {{% icon-resolved %}} A proxy deployed with a `specRef` field (OAS) and `basePathVersionAppendRule` set to other than `NONE` may cause versions to appear twice in the deployed location block {#36666} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 36666 | Fixed in API Connectivity Manager 1.9.0 | - -{{}} -#### Description -If you add an API doc and reference it with the `specRef` field in the proxy object, the OAS (API doc) is used as the source of truth for the base path. If the OAS (API doc) contains the full correct base path, and you use any `basePathVersionAppendRule` value other than `NONE`, the base path will be corrupted by appending/prepending the version in the deployment (e.g. `/api/v3/v3`). - -#### Workaround - -If you are using an API doc with a proxy: - - 1. Put the entire true base path of the API in the server section of the API doc: - - ```nginx - Servers: - - url: https://(API-address)/api/v3 - ``` - - or - - ```nginx - Servers: - - url: /api/v3 - ``` - - {{< call-out "note" >}}In the example above only `/api/v3` is relevant for this issue, and it should be the full base path to which the individual paths in the API document can be appended directly. {{< /call-out >}} - - 2. Set the value of the base path version append rule (`basePathVersionAppendRule`) in the proxy to `NONE`. - ---- - -### {{% icon-resolved %}} New users are unable to see pages even though they have been given access. {#36607} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 36607 | Fixed in API Connectivity Manager 1.3.0 | - -{{}} -#### Description -A newly created role needs a minimum of READ access on the LICENSING feature. Without this, the users will not have access to the pages even though they have been granted permission. They will see 403 errors surfacing as license errors while accessing the pages. - -#### Workaround - -Assign a minimum of READ access on the LICENSING feature to all new roles - ---- - -## 1.1.0 -August 18, 2022 - -### {{% icon-resolved %}} To see updates to the Listener's table, forced refresh of the cluster details page is required. {#36540} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 36540 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -When trying to update the Advance Config for Environment cluster, changes are not reflected on the cluster details page after saving and submitting successfully. - -#### Workaround - -Refresh or reload the browser page to see changes on the cluster details page. - ---- - -### {{% icon-resolved %}} Using labels to specify the backend is partially available {#36317} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 36317 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -The `targetBackendServiceLabel` label is not editable through the web interface. `targetBackendServiceLabel` is not configurable at the URI level in the spec. - -#### Workaround - -`targetBackendServiceLabel` label can be updated by sending a PUT command to the API. - ---- - -### {{% icon-resolved %}} Ratelimit policy cannot be applied with OAuth2 JWT Assertion policy. {#36095} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 36095 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -Rate limit policy cannot be applied with the OAuth2 JWT assertion policy. - ---- - -### {{% icon-resolved %}} Enums are not supported in Advanced Routing. {#34854} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 34854 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -Enums cannot be set for path or query parameters while applying advanced routing. A list of specific values cannot be specified for their advanced routing parameters. - ---- - -## 1.0.0 -July 19, 2022 - -### {{% icon-resolved %}} The API Connectivity Manager module won't load if the Security Monitoring module is enabled {#39943} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 39943 | Fixed in Instance Manager 2.8.0 | - -{{}} -#### Description -If you have Instance Manager 2.7 or earlier installed and attempt to enable both the API Connectivity Manager and Security Monitoring modules on the same NGINX Management Suite management plane, the API Connectivity Manager module will not load because of incompatibility issues with the Security Monitoring module. - -#### Workaround - -Before enabling the API Connectivity Manager and Security Monitoring modules, ensure that your Instance Manager is upgraded to version 2.8 or later. Be sure to read the release notes for each module carefully, as they may contain important information about version dependencies. - -To see which version of Instance Manager you have installed, run the following command: - -- CentOS, RHEL, RPM-based: - - ```bash - yum info nms-instance-manager - ``` - -- Debian, Ubuntu, Deb-based: - - ```bash - dpkg -s nms-instance-manager - ``` - ---- - -### {{% icon-resolved %}} Credentials endpoint is disabled by default {#35630} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 35630 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -For security reasons, the credentials endpoint on API Connectivity Manager(ACM) is disabled by default. To use the developer portal credentials workflow, configuration changes need to be made on the ACM host to enable credentials endpoints. Also, communication between ACM and the developer portal can be secured by providing certificates. - -#### Workaround - -To enable the credentials endpoints on ACM host - -1. SSH to the ACM host -1. Enable resource credentials endpoint - In {{/etc/nms/nginx/locations/nms-acm.conf}}, uncomment the location block - - ```nginx - #Deployment of resource credentials from the devportal - # Uncomment this block when using devportal. Authentication is disabled - # for this location. This location block will mutually - # verify the client trying to access the credentials API. - # location = /api/v1/devportal/credentials { - # OIDC authentication (uncomment to disable) - # auth_jwt off; - # auth_basic off; - # error_page 401 /401_certs.json; - # if ($ssl_client_verify != SUCCESS) { - # return 401; - # } - # proxy_pass http://apim-service/api/v1/devportal/credentials; - #} - ``` - -1. Save the changes. -1. Reload NGINX on the ACM host: `nginx -s reload` - ---- - -### {{% icon-resolved %}} Unable to delete an environment that is stuck in a Configuring state. {#35546} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 35546 | Fixed in API Connectivity Manager 1.2.0 | - -{{}} -#### Description -In the web interface, after deleting all of the proxy clusters is an environment that's in a `FAIL` state, the environment may transition to a `CONFIGURING` state and cannot be deleted. - -#### Workaround - -Add back the deleted proxy clusters using the web interface. The environment will transition to a `Fail` state. At this point, you can use the API to delete the proxy by sending a `DELETE` request to: - -``` text -https:///api/acm/v1/infrastructure/workspaces//environments/ -``` - ---- - -### {{% icon-resolved %}} Installing NGINX Agent on Ubuntu 22.04 LTS fails with `404 Not Found` error {#35339} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 35339 | Fixed in API Connectivity Manager 1.3.0 | - -{{}} -#### Description -When installing the NGINX Agent on Ubuntu 22.04 LTS, the installation script fails with a `404 Not Found` error similar to the following: - -```text -404 Not found [IP: ] -Reading package lists... -E: The repository 'https://192.0.2.0/packages-repository/deb/ubuntu jammy Release' does not have a Release file. -E: The repository 'https://pkgs.nginx.com/app-protect/ubuntu jammy Release' does not have a Release file. -E: The repository 'https://pkgs.nginx.com/app-protect-security-updates/ubuntu jammy Release' does not have a Release file. -``` - -#### Workaround - -Edit the NGINX Agent install script to use the codename `focal` for Ubuntu 20.04. - -1. Download the installation script: - - ```bash - curl -k https:///install/nginx-agent > install.sh - ``` - -2. Open the `install.sh` file for editing. -3. Make the following changes: - - On **lines 256-258**, change the following: - - ```text - codename=$(cat /etc/*-release | grep '^DISTRIB_CODENAME' | - sed 's/^[^=]*=\([^=]*\)/\1/' | - tr '[:upper:]' '[:lower:]') - ``` - - to: - - ```text - codename=focal - ``` - -
      - - **—OR—** - - Alternatively, on **line 454**, change the following: - - ```text - deb ${PACKAGES_URL}/deb/${os}/ ${codename} agent - ``` - - to: - - ```text - deb ${PACKAGES_URL}/deb/${os}/ focal agent - ``` - -4. Save the changes. -5. Run the `install.sh` script. - ---- - -### {{% icon-bug %}} OIDC policy cannot be applied on a shared proxy cluster {#35337} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 35337 | Open | - -{{}} -#### Description -If the same proxy cluster is used for both the Developer Portal and API Gateway, the OIDC Policy is not applied. - -#### Workaround - -Within an environment, use separate proxy clusters for the Developer Portal and API Gateway when applying an OIDC policy. - ---- - -### {{% icon-resolved %}} No validation when conflicting policies are added {#34531} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 34531 | Fixed in API Connectivity Manager 1.3.0 | - -{{}} -#### Description -When securing the API Proxy with policies like basic authentication or APIKey authentication, the user is not warned if a duplicate or conflicting policy is already added. Conflicting policies are not validated. - -#### Workaround - -Secure the API proxy with only one policy. - ---- - -### {{% icon-resolved %}} CORS policy doesn't support proxying preflight requests to the backend when combined with an authentication policy {#34449} - -{{}} - -| Issue ID | Status | -|----------------|--------| -| 34449 | Fixed in API Connectivity Manager 1.6.0 | - -{{}} -#### Description -On an API Proxy with an authentication policy, applying a CORS policy with `preflightContinue=true` is not supported. - -#### Workaround - -Apply CORS policy and set `preflightContinue=false`. diff --git a/content/nms/acm/releases/release-notes.md b/content/nms/acm/releases/release-notes.md deleted file mode 100644 index cae372091..000000000 --- a/content/nms/acm/releases/release-notes.md +++ /dev/null @@ -1,986 +0,0 @@ ---- -description: These release notes list and describe the new features, enhancements, - and resolved issues in NGINX Management Suite API Connectivity Manager. -nd-docs: DOCS-931 -title: Release Notes -toc: true -weight: 100 ---- - ---- - -## 1.9.3 - -November 06, 2024 - -### Upgrade Paths {#1-9-3-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.6.0 - 1.9.2 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-9-3-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Known Issues{#1-9-3-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.9.2 - -March 14, 2024 - -### Upgrade Paths {#1-9-2-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.6.0 - 1.9.1 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-9-2-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Resolved Issues{#1-9-2-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} JWT tokens are overwritten when multiple proxies are assigned to one gateway [(44636)]({{< ref "/nms/acm/releases/known-issues.md#44636" >}}) - -### Known Issues{#1-9-2-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.9.1 - -October 05, 2023 - -### Upgrade Paths {#1-9-1-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.6.0 - 1.9.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-9-1-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Resolved Issues{#1-9-1-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Module crashes when an OpenAPI spec is uploaded with a global security requirement that contains an empty security requirement object [(44393)]({{< ref "/nms/acm/releases/known-issues.md#44393" >}}) - -### Known Issues{#1-9-1-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.9.0 - -September 07, 2023 - -### Upgrade Paths {#1-9-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.6.0 - 1.8.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-9-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Server URL templating in OpenAPI specification file** - - Now you can use templating for the server URL in a supplied OpenAPI specification. You must supply the full explicit `basePath` as part of the server URL in the OpenAPI specification file. - - When creating an API proxy using an OAS file, the following values will not be editable in the web interface if they are provided via the OAS spec file: - - ```json - servers: - url: http://{server}.hostname.com/api/{version} - variables: - server: - default: customers - version: - default: v1 - basePathVersionAppendRule: - default : none - stripBasePathVersion: - default : false - ``` - -- {{% icon-feature %}} **OpenAPI specification support for OAuth2 JWT assertion policy** - - You can now specify an OAuth2 JWT assertion policy to apply to the API Proxy being created using an OpenAPI specification file. - -- {{% icon-feature %}} **Backend server configuration from OpenAPI specification file** - - You can provide the backend server configuration for upstream servers in an OpenAPI specification file using extensions specific to API Connectivity Manager. See the [Publish an API Proxy]({{< ref "/nms/acm/getting-started/publish-api-proxy.md#publish-api-proxy-with-spec" >}}) documentation. - - -### Resolved Issues{#1-9-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} A proxy deployed with a `specRef` field (OAS) and `basePathVersionAppendRule` set to other than `NONE` may cause versions to appear twice in the deployed location block [(36666)]({{< ref "/nms/acm/releases/known-issues.md#36666" >}}) -- {{% icon-resolved %}} Resources deployed to a Developer Portal which has had its database reset cannot be updated or removed [(43140)]({{< ref "/nms/acm/releases/known-issues.md#43140" >}}) -- {{% icon-resolved %}} Certificates associated with empty instance groups can be deleted, resulting in a broken reference in the API Connectivity Manager module [(43671)]({{< ref "/nms/acm/releases/known-issues.md#43671" >}}) -- {{% icon-resolved %}} Deployment fails due to duplicate locations [(43673)]({{< ref "/nms/acm/releases/known-issues.md#43673" >}}) -- {{% icon-resolved %}} Cannot use TLS enabled backend with HTTP backend-config policy [(44212)]({{< ref "/nms/acm/releases/known-issues.md#44212" >}}) - -### Known Issues{#1-9-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.8.0 - -July 27, 2023 - -### Upgrade Paths {#1-8-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.5.0 - 1.7.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-8-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Advanced security policy for proxies** - - You can use the [Advanced Security policy]({{< ref "/nms/acm/how-to/policies/advanced-security.md" >}}) to add a pre-defined NGINX App Protect to your deployment. This enhancement allows you to specify the rules for each API. - -- {{% icon-feature %}} **Publish APIs using OpenAPI Specification version 3.0 or 3.1** - - Now, you can publish APIs using OpenAPI Specification version 3.0 or 3.1 - -- {{% icon-feature %}} **Added `matchRule` field to the `route` items in `proxyConfig.ingress`** - - The `matchRule` field is now available in the `route` items in `proxyConfig.ingress`. This field is optional and allows you to define a path matching rule for advanced routes. - - The OpenAPI Specification now supports the `x-acm-match-rule` extension for defining match rules for paths within routes. If you don't specify a value for this extension, it will default to `EXACT`. The only allowed values for `matchRule` are the strings `EXACT` and `PREFIX`. - - -### Changes in Default Behavior{#1-8-0-changes-in-behavior} -This release has the following changes in default behavior: - -- {{% icon-feature %}} **Proxy labels removed** - - Labels on proxies were added with future use cases in mind although without a current need. The proxy labels have been removed to avoid confusion as to their purpose. - - -### Resolved Issues{#1-8-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Environments with WAF enabled may transition to a Failed status when a Developer Portal cluster is added. [(43231)]({{< ref "/nms/acm/releases/known-issues.md#43231" >}}) - -### Known Issues{#1-8-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.7.0 - -June 21, 2023 - -### Upgrade Paths {#1-7-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.4.0 - 1.6.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-7-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Advanced Security Policy** - - The new [Advanced Security policy]({{< ref "/nms/acm/how-to/policies/advanced-security.md" >}}) can be used to add a pre-defined NGINX App Protect configuration to your deployment. Doing so will apply the rules specified in the policy to your APIs. - -- {{% icon-feature %}} **Option added to allow API proxy to ignore invalid headers** - - The [Request Header Specification policy]({{< ref "/nms/acm/how-to/policies/request-header-specification.md" >}}) allows headers with (.) and (\_) characters to be proxied to backend services. - - By default, NGINX server will drop all headers that contain (.) and (\_) characters in the header name. Though not common, it is a legal character in headers. This feature will allow users to instruct NGINX to allow such headers to be proxied. - -- {{% icon-feature %}} **Regex support added to access control routing claims** - - Access control routing claims can be arrays. For example, roles and groups are typically represented as an array. You can now use a regular expression to match against claims embedded in arrays. - -- {{% icon-feature %}} **Ingress routing rules now allow using regular expressions** - - Regular expressions are now supported in routing rules. This will enable routing of requests that match against strings like `?wsdl`. - - -### Resolved Issues{#1-7-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} The routes filter under the proxy metrics page won’t work with params [(42471)]({{< ref "/nms/acm/releases/known-issues.md#42471" >}}) -- {{% icon-resolved %}} Multiple entries selected when gateway proxy hostnames are the same [(42515)]({{< ref "/nms/acm/releases/known-issues.md#42515" >}}) - -### Known Issues{#1-7-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.6.0 - -May 11, 2023 - -### Upgrade Paths {#1-6-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.3.0 - 1.5.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-6-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Create security policies using an OAS specification** - - With the latest update, you can now create APIKey and Basic Auth security policies using an OAS specification. This enhancement streamlines the process for creating policies, reduces errors, and improves system security. API Connectivity Manager and NGINX can be integrated into the build pipeline where you generate OpenAPI specs. - -- {{% icon-feature %}} **New buffer settings were added to the HTTP Backend Configuration Proxy policy to enhance performance** - - With the latest HTTP Backend Configuration Proxy policy update, you can now modify the size and location of buffer temporary files or turn off buffering altogether. This enhancement offers greater flexibility and control to API Connectivity Manager users, allowing them to optimize their system's performance and improve the overall end-user experience. - -- {{% icon-feature %}} **Gain deeper insights into your environments with enhanced analytics and metrics** - - With this release, you can view more information about your environments. This includes the number of clusters and runtimes, the number of APIs available, and the total amount of data transmitted in and out of each cluster. Additionally, you can view graphs displaying crucial analytics, including traffic metrics, which can help you better understand your system's performance. - - -### Resolved Issues{#1-6-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} CORS policy doesn't support proxying preflight requests to the backend when combined with an authentication policy [(34449)]({{< ref "/nms/acm/releases/known-issues.md#34449" >}}) -- {{% icon-resolved %}} TLS setting on listener is not reset when TLS policy is removed [(41426)]({{< ref "/nms/acm/releases/known-issues.md#41426" >}}) -- {{% icon-resolved %}} Developer Portal: When typing the links to use for the footer, the text boxes keep losing focus [(41626)]({{< ref "/nms/acm/releases/known-issues.md#41626" >}}) -- {{% icon-resolved %}} Array values in token claims are treated as string values [(42388)]({{< ref "/nms/acm/releases/known-issues.md#42388" >}}) - -### Known Issues{#1-6-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.5.0 - -March 28, 2023 - -### Upgrade Paths {#1-5-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.2.0 - 1.4.1 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-5-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Use role-based access control for enhanced security and governance** - - With new [built-in RBAC roles for API Connectivity Manager]({{< ref "/nim/admin-guide/rbac/overview-rbac.md#build-in-roles" >}}), administrators can grant or restrict user access to workspaces and features, empowering teams to manage their own workflows. - - {{< call-out "note" >}} - - [Set Up RBAC for API Owners]({{< ref "/nms/acm/tutorials/rbac-api-owners.md" >}}) - - [Set Up RBAC for Infra Admins]({{< ref "/nms/acm/tutorials/rbac-infra-admins.md" >}}) - {{< /call-out>}} - -- {{% icon-feature %}} **Multiple hostname support** - - Proxy clusters can be shared across multiple environments (hostnames). - -- {{% icon-feature %}} **Secure handling of sensitive data** - - API Connectivity Manager now provides enhanced security for sensitive data, including credentials used in APIKeys, Basic Auth, OAuth2, and JWT policies. All secrets are stored in a secure Vault and encrypted for added protection. - -- {{% icon-feature %}} **Runtime state sharing in an API gateway or Developer Portal** - - Administrators can use [cluster-wide policies]({{< ref "/nms/acm/how-to/policies/cluster-wide-config.md" >}}) to configure uniform settings across all instances in the cluster, such as worker connections, hash table size, and keepalive settings, to optimize performance. Furthermore, using the [Cluster Zone Sync policy]({{< ref "/nms/acm/how-to/policies/cluster-zone-sync.md" >}}), the cluster can be configured to share the runtime state and sync data across all instances, allowing for cluster-wide rate limits and sticky sessions. - -- {{% icon-feature %}} **Performance improvements for the web interface** - - A number of improvements have been made to how the web interface queries the backend services when fetching data. - -- {{% icon-feature %}} **Add a Health Check policy to your gRPC proxy to ensure optimal performance** - - The [gRPC proxy can be enabled with a Health Check policy]({{< ref "/nms/acm/how-to/policies/grpc-policies.md#health-check" >}}), allowing it to check the health status of backend gRPC services and route requests accordingly. - -- {{% icon-feature %}} **Improved certificate handling** - - API Connectivity Manager will not generate new certificates if any have already been specified in the TLS policy; instead, ACM will reference the existing certificates. In this way, wildcard certificates may be employed. - - -### Security Updates{#1-5-0-security-updates} - -{{< call-out "important" >}} -For the protection of our customers, NGINX doesn’t disclose security issues until an investigation has occurred and a fix is available. -{{< /call-out >}} - -This release includes the following security updates: - -- {{% icon-resolved %}} **Instance Manager vulnerability CVE-2023-1550** - - NGINX Agent inserts sensitive information into a log file ([CVE-2023-1550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1550)). An authenticated attacker with local access to read NGINX Agent log files may gain access to private keys. This issue is exposed only when the non-default trace-level logging is enabled. - - NGINX Agent is included with NGINX Instance Manager, and used in conjunction with API Connectivity Manager and the Security Monitoring module. - - This issue has been classified as [CWE-532: Insertion of Sensitive Information into Log File](https://cwe.mitre.org/data/definitions/532.html). - - - Mitigation: - - - Avoid configuring trace-level logging in the NGINX Agent configuration file. For more information, refer to the [Configuring the NGINX Agent]({{< ref "/nms/nginx-agent/install-nginx-agent.md#configuring-the-nginx-agent ">}}) section of NGINX Management Suite documentation. If trace-level logging is required, ensure only trusted users have access to the log files. - - - Fixed in: - - - NGINX Agent 2.23.3 - - Instance Manager 2.9.0 - - For more information, refer to the MyF5 article [K000133135](https://my.f5.com/manage/s/article/K000133135). - - -### Changes in Default Behavior{#1-5-0-changes-in-behavior} -This release has the following changes in default behavior: - -- {{% icon-feature %}} **ACL IP Policy denies IP addresses by default** - - Updates the ACL IP policy to deny IP addresses by default instead of allowing them by default. - - -### Resolved Issues{#1-5-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Configurations aren't pushed to newly onboarded instances if another instance is offline [(40035)]({{< ref "/nms/acm/releases/known-issues.md#40035" >}}) -- {{% icon-resolved %}} The Proxy Cluster API isn't ready to be used [(40097)]({{< ref "/nms/acm/releases/known-issues.md#40097" >}}) - -### Known Issues{#1-5-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.4.1 - -February 02, 2023 - -### Upgrade Paths {#1-4-1-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.1.0 - 1.4.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-4-1-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Resolved Issues{#1-4-1-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Cluster and Environment deletion issues when Portal Docs are published [(40163)]({{< ref "/nms/acm/releases/known-issues.md#40163" >}}) - -### Known Issues{#1-4-1-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.4.0 - -January 23, 2023 - -### Upgrade Paths {#1-4-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.1.0 - 1.3.1 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-4-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Allow or deny access to APIs for specified consumers** - - Control access to APIs to prevent unauthorized requests from designated consumers. - -- {{% icon-feature %}} **OAuth2 Introspection policy now supports token claim verification** - - API admins can configure an OAuth2 Introspection policy with token claim verification. If the value of an introspected token claim matches the values in the policy configuration, the request will be allowed to proceed to the backend. If not, the request will be denied, and `403 Forbidden` will be returned. - -- {{% icon-feature %}} **Adds support for NGINX Plus R28** - - API Connectivity Manager 1.4.0 is compatible with NGINX Plus R28. For requirements related to NGINX Management Suite and API Connectivity Manager, please refer to the [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs.md" >}}) guide. - - -### Resolved Issues{#1-4-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} A JWT token present in a query parameter is not proxied to the backend for advanced routes [(39328)]({{< ref "/nms/acm/releases/known-issues.md#39328" >}}) -- {{% icon-resolved %}} OIDC policy cannot be applied alongside a proxy authentication policy [(39604)]({{< ref "/nms/acm/releases/known-issues.md#39604" >}}) - -### Known Issues{#1-4-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.3.1 - -December 16, 2022 - -### Upgrade Paths {#1-3-1-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.0.0 - 1.3.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-3-1-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Resolved Issues{#1-3-1-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Developer Portal backend information is unintentionally updated when editing clusters within an environment [(39409)]({{< ref "/nms/acm/releases/known-issues.md#39409" >}}) -- {{% icon-resolved %}} The Inbound TLS policy breaks when upgrading from API Connectivity Manager 1.2.0 to 1.3.0. [(39426)]({{< ref "/nms/acm/releases/known-issues.md#39426" >}}) -- {{% icon-resolved %}} The web interface doesn't pass the `enableSNI` property for the TLS backend policy [(39445)]({{< ref "/nms/acm/releases/known-issues.md#39445" >}}) - -### Known Issues{#1-3-1-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.3.0 - -December 12, 2022 - -### Upgrade Paths {#1-3-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.0.0 - 1.2.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-3-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Configure access-controlled routing** - - API lifecycle management requires routing API traffic with fine-level control, which is something that token-based authentication schemes that leverage JWT claims do well. Permissions can be encoded as custom claims in the token. Then, once the API proxy validates the token (JWT), it can access all the fields in the token as variables. Decisions can be made based on matching the claims. - - - Applying Fine-Grained Access Control - - API Owners can apply fine-grained access control and restrict access to their APIs based on specific claims in the token. The policy can be configured to enforce fine-grained control for specific routes or be fine-tuned to support particular methods per route. - - - Header-Based Routing - - Routing decisions can be made based on headers in the incoming requests. API owners can configure rules and conditions that must be matched before routing requests. - - See [Configure Access Control Routing]({{< ref "/nms/acm/how-to/policies/access-control-routing.md" >}}) to learn how to restrict access to your application servers based on JWT claims or header values. - -- {{% icon-feature %}} **Use the web interface to publish and manage gRPC services** - - With API Connectivity Manager 1.2, we introduced support for [publishing and managing gRPC services]({{< ref "/nms/acm/how-to/policies/grpc-policies.md" >}}). Now, in this release, we extend that capability to the web interface. - - You can secure gRPC services with the following policies: - - - gRPC environment policies - - - Error Response Format - - Log Format - - Proxy Response Headers - - Request Body Size Limit - - Request Correlation ID - - TLS Backend - - TLS Inbound - - - gRPC proxy policies: - - - ACL IP Restriction - - APIKey Authentication - - Basic Authentication - - GRPC Backend Config - - JSON Web Token Assertion - - OAuth2 Introspection - - Proxy Request Headers - - Rate Limit - -- {{% icon-feature %}} **Secure communication between API Connectivity Manager and Developer Portal with mTLS** - - API Connectivity Manager communicates with the Developer Portal host to publish API docs and create API credentials. Now, PlatformOps can secure this communication channel by enabling mTLS between the hosts. - - Previously, mTLS required a TLS backend policy on the internal portal proxy cluster. API Connectivity Manager 1.3 removes that restriction. The TLS inbound policy on the internal portal allows providing a client certificate for API Connectivity Manager when mTLS is enabled. API Connectivity Manager presents this client certificate when connecting to the Developer Portal, identifying itself as a trusted client. - -- {{% icon-feature %}} **Other Enhancements** - - - **Improved policy layout** - - The Policy user interface has been improved with highlights for the different policy sections. - - - **NGINX Management Suite config changes are preserved during upgrade** - - Upgrades no longer overwrite customized configurations unless instructed to by the user. - - - **Support for chained certificates** - - Infrastructure administrators can now upload public certificates in PEM format, along with an optional list of intermediate certificates for validating the public certificate. - - - **Support for SNI requirements from hosted services** - - API owners can now use the OAuth2 policy with hosted Identity Provider services that enforce Server Name Indication (SNI). - - -### Resolved Issues{#1-3-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} No validation when conflicting policies are added [(34531)]({{< ref "/nms/acm/releases/known-issues.md#34531" >}}) -- {{% icon-resolved %}} Installing NGINX Agent on Ubuntu 22.04 LTS fails with `404 Not Found` error [(35339)]({{< ref "/nms/acm/releases/known-issues.md#35339" >}}) -- {{% icon-resolved %}} New users are unable to see pages even though they have been given access. [(36607)]({{< ref "/nms/acm/releases/known-issues.md#36607" >}}) -- {{% icon-resolved %}} Portals secured with TLS policy require additional environment configuration prior to publishing API docs [(38028)]({{< ref "/nms/acm/releases/known-issues.md#38028" >}}) -- {{% icon-resolved %}} The user interface is erroneously including irrelevant information on the TLS inbound policy workflow [(38046)]({{< ref "/nms/acm/releases/known-issues.md#38046" >}}) - -### Known Issues{#1-3-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.2.0 - -October 18, 2022 - -### Upgrade Paths {#1-2-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.0.0 - 1.1.1 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-2-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Restrict access to APIs based on IP address** - - Using the [ACL-IP policy]({{< ref "/nms/acm/how-to/policies/api-access-control-lists.md" >}}), API owners can now restrict access to APIs based on IP addresses. APIs can be protected by quickly blocking rogue requests from certain IPs or allowing access to only known IPs. - -- {{% icon-feature %}} **Secure API access with OAuth2 tokens** - - API Owners can [restrict access to their APIs with OAuth2 tokens]({{< ref "/nms/acm/how-to/policies/introspection.md" >}}) by swapping an opaque token for claims or a JWT token to be proxied to the backend service. The policy can be configured to grant access to APIs after having the tokens introspected. In addition, the claims in the token can be extracted and forwarded to the backend service. - - {{< call-out "tip" >}}Learn how to [set up an OAuth2 Introspection policy with Keycloak]({{< ref "/nms/acm/tutorials/introspection-keycloak.md" >}}) as the authorization server.{{< /call-out >}} - -- {{% icon-feature %}} **Enhanced API documentation on developer portal** - - The API documentation published to the Developer Portal now displays detailed security schema information for each API. - -- {{% icon-feature %}} **Support for HTTP/2** - - To improve the performance and efficiency of client-server interactions, HTTP/2 can be enabled on the [API proxies]({{< ref "/nms/acm/getting-started/publish-api-proxy.md#set-up-api-proxy" >}}). With HTTP/2 enabled, API Proxies will continue to maintain backward compatibility with older browsers. - -- {{% icon-feature %}} **Improved visualizations for resource credentials** - - API owners can now view the origin of resource credentials. The source field indicates where the credentials were created. For security reasons, the credentials created on the Developer Portal will be masked, but the API owners can view the origin of the resource credentials. - -- {{% icon-feature %}} **Express API payload size with unit of measure** - - The maximum allowed size for the client request body can now be configured in bytes, kilobytes(K) or megabytes(M). - - The `maxRequestBodySizeLimit` attribute of the policy is deprecated and will be removed in API Connectivity Manager 1.3.0. `Size` is the new attribute that supports bytes, megabytes(M), and kilobytes(K). The default setting is 1M. - -- {{% icon-feature %}} **Database backup included in support packages** - - The [Developer Portal support package]({{< ref "/nms/support/support-package.md" >}}) now includes the option to back up the PostgreSQL database. - -- {{% icon-feature %}} **Publish and manage gRPC services - preview release** - - {{< call-out "important" >}}This is a **preview** feature for you to try out. You shouldn't use preview features for production purposes.{{< /call-out >}} - - To handle gRPC traffic, you can now [publish and manage gRPC proxies]({{< ref "/nms/acm/how-to/services/publish-grpc-proxy.md" >}}). - - Publish gRPC proxies and route gRPC traffic to support the following use cases: - - - Simple RPC (single request‑response) - - Response‑streaming RPC - - Request‑streaming RPC - - Bidirectional‑streaming RPC - - Route to all services in a gRPC service package - - Route to a single gRPC service - - Route to individual gRPC methods - - Route to multiple gRPC services - - Respond to errors with custom gRPC error response format policy - -- {{% icon-feature %}} **Out-of-the-box protection for Developer Portals** - - Developer Portals are now deployed with out-of-the-box protection against rapid requests/overuse and server fingerprinting: - - 1. Protection against server fingerprinting - - The proxy response header policy is now applied by default to a Developer Portal. The default policy disables server tokens from being returned in the proxy response. - - 2. Protection against rapid requests and over-use - - To protect the portal application, the default rate limit policy limits the number of requests a client can make in a time period. Platform admins can customize the policy to meet their SLAs. - -- {{% icon-feature %}} **Support for multi-host deployment pattern for Developer Portals** - - Developer Portals can support multiple deployment patterns. The portal backend API service can be scaled to multiple hosts and can be load-balanced using host IP addresses or internal DNS. - - To support the deployment patterns, `configs -> proxyConfig -> backends ` object has been introduced in the Portal Proxy runtime. The existing `backend` object in the `proxyCluster` object of the Portal Proxy runtime is being deprecated and will not be available in the next major release version. - - -### Resolved Issues{#1-2-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} Enums are not supported in Advanced Routing. [(34854)]({{< ref "/nms/acm/releases/known-issues.md#34854" >}}) -- {{% icon-resolved %}} Unable to delete an environment that is stuck in a Configuring state. [(35546)]({{< ref "/nms/acm/releases/known-issues.md#35546" >}}) -- {{% icon-resolved %}} Credentials endpoint is disabled by default [(35630)]({{< ref "/nms/acm/releases/known-issues.md#35630" >}}) -- {{% icon-resolved %}} Ratelimit policy cannot be applied with OAuth2 JWT Assertion policy. [(36095)]({{< ref "/nms/acm/releases/known-issues.md#36095" >}}) -- {{% icon-resolved %}} Using labels to specify the backend is partially available [(36317)]({{< ref "/nms/acm/releases/known-issues.md#36317" >}}) -- {{% icon-resolved %}} To see updates to the Listener's table, forced refresh of the cluster details page is required. [(36540)]({{< ref "/nms/acm/releases/known-issues.md#36540" >}}) - -### Known Issues{#1-2-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.1.1 - -August 31, 2022 - -### Upgrade Paths {#1-1-1-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.0.0 - 1.1.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-1-1-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Stability and performance improvements** - - This release includes stability and performance improvements. - - -### Resolved Issues{#1-1-1-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} OIDC policy doesn't work with Auth0 Identity Providers [(36058)]({{< ref "/nms/acm/releases/known-issues.md#36058" >}}) -- {{% icon-resolved %}} Traffic is not secured between the API Proxy and backend servers [(36714)]({{< ref "/nms/acm/releases/known-issues.md#36714" >}}) -- {{% icon-resolved %}} Advanced routing ignores the Context Root setting for backend proxies [(36775)]({{< ref "/nms/acm/releases/known-issues.md#36775" >}}) - -### Known Issues{#1-1-1-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.1.0 - -August 18, 2022 - -### Upgrade Paths {#1-1-0-upgrade-paths} - -API Connectivity Manager supports upgrades from these previous versions: - -- 1.0.0 - -If your installed version is older, you may need to upgrade to an intermediate version before upgrading to the target version. - - - -
      - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-1-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **Advanced Cluster Management** - - Including more than one proxy cluster with the same hostname in an environment replicates configuration across all clusters and assists with blue-green deployments. With advanced cluster management, you can use a load balancer in front of the clusters to slowly move to the newer version of the API gateway. For example, one cluster may belong to NGINX Plus version R26 and another to R27. See the [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs.md#data-plane-dev-portal" >}}). - -- {{% icon-feature %}} **Advanced Routing feature is available now** - - Advanced routing feature is available now. You can use it to publish an API Proxy and route specific URIs/endpoints precisely to a backend service. Advanced routing with OAS Specification allows you to import a specification file, parse all the URIs/endpoints in the file and publish API proxy by routing each URI/endpoint precisely to a backend service. To use the advanced routing feature without an OAS specification file, add the URI/endpoints while publishing the API proxy. See the [Advanced Configurations]({{< ref "/nms/acm/how-to/services/publish-api.md#advanced-configurations" >}}) section. - -- {{% icon-feature %}} **SQLite is supported for Developer Portal** - - SQLite is now supported as a database for [Developer Portal installations]({{< ref "/nms/acm/getting-started/add-devportal.md" >}}). - -- {{% icon-feature %}} **Support for NGINX Plus Release 27 (R27)** - - This release supports NGINX Plus Release 27 (R27) version for Data Plane instances. See the [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs.md" >}}). - - -### Resolved Issues{#1-1-0-resolved-issues} -This release fixes the following issues. Select an issue's ID link to view its details. - -- {{% icon-resolved %}} JWT Assertion policy accepts an empty string value for tokenName property [(35419)]({{< ref "/nms/acm/releases/known-issues.md#35419" >}}) -- {{% icon-resolved %}} Environment is in a premature success state even though all proxy clusters may not be onboarded [(35430)]({{< ref "/nms/acm/releases/known-issues.md#35430" >}}) -- {{% icon-resolved %}} Cannot add, remove, or edit proxy clusters from an environment that has a published API proxy [(35463)]({{< ref "/nms/acm/releases/known-issues.md#35463" >}}) -- {{% icon-resolved %}} Features in the web interface are not displayed after uploading license [(35525)]({{< ref "/nms/acm/releases/known-issues.md#35525" >}}) -- {{% icon-resolved %}} DEVPORTAL_OPTS in /etc/{default,sysconfig}/nginx-devportal does not work if value has multiple words [(36040)]({{< ref "/nms/acm/releases/known-issues.md#36040" >}}) - -### Known Issues{#1-1-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - ---- - -## 1.0.0 - -July 19, 2022 - - -
      - Dependencies with Instance Manager - -{{< include "tech-specs/acm-nim-dependencies.md" >}} - -
      - - -### What's New{#1-0-0-whats-new} -This release includes the following updates: - -- {{% icon-feature %}} **API Connectivity Manager is now available** - - - Create and manage isolated workspaces for business units, development teams, etc., so each team can develop and deploy at its own pace without affecting other teams. - - Create and manage API infrastructure in isolated workspaces. - - Create and manage production and non-production environments within team workspaces and control who can access APIs at various lifecycle stages. For example, keep APIs under development private and publish production-ready APIs for public access. - - Enforce uniform security policies across all workspaces by applying global policies. - - Create Developer Portals that align with your brand, with custom color themes, logos, and favicons. - - On-board your APIs, publish to an API gateway, and publish your API documentation to the Developer Portal. - - Let teams apply policies to their API proxies to provide custom quality of service for individual applications. - - On-board API documentation by uploading an OpenAPI spec. - - Publish your API docs to a Developer Portal without giving the public access to your API. - - Monitor system and traffic metrics at the instance level. - - Self-service credential issuance for API Keys and Basic Authentication. - - Test API calls to your system using the "Try it out" feature in the Developer Portal. - - -### Known Issues{#1-0-0-known-issues} - -You can find information about known issues in the [Known Issues]({{< ref "/nms/acm/releases/known-issues.md" >}}) topic. - diff --git a/content/nms/acm/troubleshooting.md b/content/nms/acm/troubleshooting.md deleted file mode 100644 index 4ae12365e..000000000 --- a/content/nms/acm/troubleshooting.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -description: This topic describes possible issues users might encounter when using - API Connectivity Manager. When possible, suggested workarounds are provided. -nd-docs: DOCS-1222 -title: Troubleshooting -toc: true -weight: 1000 -type: -- reference ---- - -## System returns `403 Forbidden` error for authorized resources - -### Description - -Users are unable to access API Connectivity Manager features that they've been granted permission for. - -The system returns errors similar to the following examples: - -- Web interface error: "ACM license not found." - -- API error: "Error accessing resource: forbidden. Please contact the system administrator. User has not been granted `READ` permission." - -### Resolution - -New roles require a minimum of `READ` access for the **Licensing** feature. Without `READ` access for **Licensing**, users will be unable to access pages for which they have been granted permission; instead, the system will return `403 Forbidden` errors as licensing errors. - ---- - -## API Connectivity Manager module doesn't show up in the web interface - -### Description - -After installing the API Connectivity Manager module, the module doesn't appear in the F5 NGINX Management Suite web interface. - -### Resolution - -- Force refresh the web page. -- Restart the API Connectivity Manager service: - - ```bash - sudo systemctl restart nms-acm - ``` - ---- - -## Routing traffic fails with `RangeError: Maximum call stack size exceeded` in the data plane error logs - -### Description - -After deploying an API Proxy using a large OpenAPI Specification or a large number of advanced routes, traffic fails to route to the backend service and instead returns a `404` error. Failed requests trigger `js exception: RangeError: Maximum call stack size exceeded` in the data plane logs. - -The number API proxy advanced routes which can be deployed to a single API proxy is dependent on the complexity of the configuration, so it is not possible to give an exact limit; however, the table below illustrates some limits based on example configurations. For example, if all of your routes support a single method and have two non-enum query parameters, your configuration should be able to support up to 440 routes per API proxy. Enum parameters are not illustrated in the table below but will reduce the number of supported routes more significantly than a non-enum parameter. - -{{< bootstrap-table "table table-striped table-bordered" >}} - -| Path/Route methods | Query parameters | Supported number of Paths/Advanced Routes | -| ------------------ | ---------------- | ----------------------------------------- | -| 1 | 0 | 1100 | -| 1 | 1+ | 440 | -| 2 | 0 | 550 | -| 2 | 1+ | 220 | -| 3 | 0 | 360 | -| 3 | 1+ | 140 | -| 4 | 0 | 270 | -| 4 | 1+ | 110 | -| | | | - -{{< /bootstrap-table >}} - -{{< call-out "note" >}} -The numbers in the above table are provided only as an example. Other factors may impact the total supported number of routes. -{{< /call-out >}} - -### Resolution - -- The limitations are for a single API proxy. Splitting your configuration and deploying it across multiple API proxies may resolve the issue. For example: - - Given an OpenAPI specification with contains 1500 routes with a single method and no parameters - - 800 paths in the specification begin with `/v1`, and 700 begin with `/v2` - - Splitting the definition into two definitions, with one containing all of the `/v1` paths and the other containing all of the `/v2` paths, should allow deployment of two API proxies which cover all of the paths defined, each one below the 1100 route limit -- Replacing enum parameters with non-enum parameters may increase the number of routes which can be deployed - ---- - -## Can't delete API Connectivity Manager objects after upgrading NGINX instances - -### Description - -After upgrading NGINX Plus instances to R27, you may not be able to delete Environments, Proxies, or Dev Portals in the API Connectivity Manager module. - -### Resolution - -Try restarting the NGINX Agent after upgrading NGINX. - -- To restart the NGINX Agent, run the following command: - - ``` bash - sudo systemctl restart nginx-agent - ``` - ---- - -## How to Get Support - -{{< include "support/how-to-get-support.md" >}} - diff --git a/content/nms/acm/tutorials/_index.md b/content/nms/acm/tutorials/_index.md deleted file mode 100644 index ced5d4727..000000000 --- a/content/nms/acm/tutorials/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -description: "The guides in this section feature end-to-end examples that will help you get the most out of F5 NGINX Management Suite API Connectivity Manager." -title: Tutorials -weight: 600 -url: /nginx-management-suite/acm/tutorials/ ---- diff --git a/content/nms/acm/tutorials/advanced-routing.md b/content/nms/acm/tutorials/advanced-routing.md deleted file mode 100644 index 18de629cf..000000000 --- a/content/nms/acm/tutorials/advanced-routing.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: Learn how to create dynamic routes for your deployments using F5 NGINX - Management Suite API Connectivity Manager. -nd-docs: DOCS-1218 -title: Set Up Advanced Routing -toc: true -weight: 110 -type: -- tutorial ---- - -## Overview - -This tutorial will show you how to create dynamic routes for your proxy deployments using the 'Advanced Routing' feature. -This allows routing to different backend services based on URI, HTTP method, etc. - -### Intended Audience - -This guide is meant for NGINX Management Suite users who can add/modify Proxy deployments and want to create dynamic route matching configurations. - -### How do I publish a Proxy with Advanced Routing? - -Follow the steps on the [Publish an HTTP API]({{< ref "/nms/acm/how-to/services/publish-api.md" >}}) section to publish a proxy. - ---- - -## Use Case - -Jane Smith has started a new job as an API developer for the Product-Search team in a hardware manufacturing company. -Jane needs to change the current catch-all route to more granular routes to support the new API endpoints she has added to the product. -These endpoints will take a mixture of `Query`, `Path`, and `Header` parameters. Jane would like to interact with different backend services based on the routes and parameters provided. - -### Workflow - -In the steps that follow, we will: - -- Create an API Gateway proxy to route the traffic to the backend services. -- Add Advanced Routing rules to allow granular control over the traffic based on the passed parameters. - ---- - -## Before You Begin - -To complete the instructions in this guide, you need the following: - -- [API Connectivity Manager is installed]({{< ref "/nms/acm/how-to/install-acm.md" >}}), [licensed]({{< ref "/nim/admin-guide/add-license.md" >}}), and running -- One or more [Service workspaces]({{< ref "/nms/acm/how-to/services/publish-api.md#create-a-service-workspace" >}}) -- One or more [Proxies]({{< ref "/nms/acm/how-to/services/publish-api.md" >}}) - ---- - -## Built-In Role - -API Connectivity Manager comes pre-configured with an [ACM API Owner]({{< ref "/nms/acm/tutorials/rbac-api-owners.md" >}}) role suitable for API Owners (The individuals or teams who are responsible for designing, creating, and maintaining APIs). - ---- - -## Example: Create An Advanced Route - -In our Proxy configuration form (found via a Proxy Create or a Proxy Edit), we will select the `Ingress` section in the left menu to see the options available to configure our proxy ingress. - -Select the **Next** button. On the next screen, we have the options related to `basepath` and `version`. At the bottom of this section, there is an expandable panel to add an `Advanced Route`; select the `Add Route` link to continue. - -This section shows several configuration options. For the purpose of this example, we will focus on the following: - -- `Match URI` -- `HTTP Method` -- `Parameters` - -We are going to create a route that can take two `integer` IDs in the path; for example, `/customer/123/order/1234`. We are going to do this by adding the following parameters: - -In the `Match URI` field add the following value: `/customer/{customerID}/order/{orderID}`. This configures our URI with placeholders for the path parameters `customerID` and `orderID`. - -Expand the `HTTP Method` menu, and select `GET` for this config. The `HTTP Method` parameter allows us to configure which HTTP Method this route will match for. So in this case, a `POST` to `/customer/123/order/1234` will not match and will return a `404` (or a `405` depending on your config). -You can route to different backend services for the same URI but different HTTP methods using the `TargetBackendServiceLabel` parameter, which will associate the config to a specific backend service and the `HTTP Method` parameter combination. - -In the `Parameters` section, select the `Add Parameter` button to see some new config options: - -- `Name` is the name of the parameter in the URI; this must match the placeholder value provided in `Match URI` (in the web interface, the validation will show an error if there is a mismatch). -We need to add one entry for `customerID` and another for `orderID` by selecting the `Add Parameter` button again. - -The `In` field indicates where the parameter will be passed; the options are `PATH`, `QUERY`, and `HEADER`. - -- `PATH` indicates that the parameter will be passed as a path parameter, for example, `/customer/{id}}`. -- `QUERY` indicates that the parameter will be passed as a query parameter, for example, `/customer?customerID=123`. -- `HEADER` indicates that it will be passed as a header with the `Name` field as the header key. - -For this example, we will use `PATH` parameters. - -`Schema Type` defines the type of parameter that will be passed, for example, `STRING`, `INTEGER`, and others which are supplied in a dropdown through the UI or in the API documentation if using the API. -For this example, we will be using `INTEGER`. - -The `Enums` option lets you limit the number of options to be allowed to match on; if anything else is passed, it doesn't match. -We won't be using `Enums` for this example. - -Now that we have added our route, we can select `Add` and `Save and Publish` on the next page. Our changes will be deployed, and we should now be able to resolve our new endpoint! - ---- diff --git a/content/nms/acm/tutorials/aws-deploy.md b/content/nms/acm/tutorials/aws-deploy.md deleted file mode 100644 index 1b4c403e8..000000000 --- a/content/nms/acm/tutorials/aws-deploy.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -description: Learn how to set up the base infrastructure required to deploy NGINX - Management Suite API Connectivity Manager in Amazon Web Services (AWS). -nd-docs: DOCS-896 -title: Amazon Web Services Deployment Guide -toc: true -weight: 300 ---- - -{{< shortversions "1.1.0" "latest" "acmvers" >}} - -## Overview - -This guide walks you through the steps needed to set up the necessary infrastructure in Amazon Web Services (AWS) for a proof of concept environment for API Connectivity Manager. The options presented in this guide for creating AWS Instances keep cost in mind and prefer the minimum requirements for running a fully functional API Connectivity Manager environment. -Keep in mind that production environments may require larger instance sizes and incur greater costs. - -### Before You Begin - -- Make sure you have an AWS account. - -{{< call-out "important" >}}Because the [minimum requirement for the F5 NGINX Management Suite host]({{< ref "/nim/fundamentals/tech-specs#system-sizing" >}}) requires 2 CPU and 4GB RAM (NOT a free tier size), completing this deployment guide will incur charges from AWS according to their price plan.{{< /call-out >}} - -### Hosts Specs - -The AWS instance types and storage capacity used in this guide are based on the [NGINX Management Suite Technical Specs]({{< ref "/nim/fundamentals/tech-specs#system-sizing" >}}). - -{{}} - -| Hosts | AWS Instance Type | AWS Storage | -|---------------------------------|-------------------|--------------| -| NGINX Management Suite Host | t3.medium | 100GB | -| Data Plane Host | t2.micro | 10GB | -| Developer Portal Host | t2.micro | 10GB | - -{{}} -Table 1.1 Host Sizing - -## Provision AWS Instances - -Complete the tasks in this section to set up the following resources in AWS: - -1. [Virtual Private Cloud](https://docs.aws.amazon.com/vpc/) -1. [EC2 Instances](https://docs.aws.amazon.com/ec2/) - -The instances you create by the end of this guide are: - -1. NGINX Management Suite Host -1. Data Plane Host -1. Developer Portal Host - -### Configure VPC - -This section creates and configures the AWS Virtual Private Cloud (VPC) as described below. If your existing VPC is able to allow the following types of traffic, skip this section. - -1. Be able to access the internet (for install) -1. Be able to establish an SSH connection from your workstation to the EC2 Instances -1. Have HTTPS traffic enabled - - To allow NGINX Management Suite user interface and/or API access - - Communication between Data Plane or Developer Portal host and NGINX Management Suite host -1. Have HTTP traffic enabled - - To allow access to the Developer Portal from a workstation - - To allow traffic for gateway proxy from a workstation - -#### Create a New VPC - -Take the steps below to create a new VPC: - -1. Go to to the **VPC** Service. -1. Select **Create VPC**. -1. In the **VPC setting** section, provide the **Name** (optional) and **IPv4 CIDR**. -1. Select **Create VPC**. - -#### Create a New Subnet - -Take the steps below to create a new subnet: - -1. On the left menu, select **Virtual private cloud > Subnets**. -1. Select **Create subnet**. -1. In the **VPC** section, select the newly created VPC from above. -1. In the **Subnet settings**, provide the **Subnet name** (optional) and **IPv4 CIDR block**. -1. Select **Create subnet**. - -#### Create a New Internet Gateway - -Take the steps below to create a new internet gateway: - -1. On the left menu, select **Virtual private cloud > Internet Gateways**. -1. Select **Create internet gateway**. -1. On the main window of the newly created internet gateway, select **Actions > Attach to VPC**. -1. Select the VPC created from above. -1. Select **Attach internet gateway**. - -{{< call-out "note" >}}The Internet Gateway is what provides a public subnet internet access.{{< /call-out >}} - -#### Create a New Route Table - -Take the steps below to create a route table, add a route entry that defaults to the internet gateway created above, and associate a subnet with this route table: - -1. On the left menu, select **Virtual private cloud > Route tables**. -1. Select **Create route table**. -1. Associate this route table to the VPC created from above. -1. Select **Create route table**. -1. Scroll down on the main window of the newly created route table then select **Edit routes**. -1. Select **Add route**. - 1. Provide `0.0.0.0/0` for the **Destination**. - 1. Select the **Internet Gateway** created from above. - 1. Select **Save changes**. -1. Scroll down on the main window on the same route table then select the **Subnet associations** tab. -1. Select **Edit subnet associations**. -1. Select the subnet created from above. -1. Select **Save changes**. - -### Create EC2 Instances - -At this point, the VPC created above is available when creating EC2 Instances. - -Before creating the EC2 instances, create your **Key Pair** and **Security Groups** if they do not already exist. The reason why they are required is described below. -{{}} - -| AWS Object | Reason | -|----------------------|--------------------------------------------------------------------------------------------| -| Key Pair | This is used to allow SSH connections in to EC2 Instances. | -| Security Groups | The security group needs to enable HTTP/S traffic and allow SSH traffic from your IP. | - -{{}} -Table 1.2 Key Pair and Security Groups Reasoning - -#### Create a Key Pair - -Take the steps below to create a **Key Pair**. - -1. Go to the **EC2** Service. -1. On the left menu, select **Network & Security > Key Pairs**. -1. You can either create a new Key Pair or import your own. - - To create a new Key Pair: - 1. Select **Create key pair**. - 1. Provide the **Name**. **Key pair type**, and **Private key file format**. - - To import your existing Key Pair: - 1. Select **Actions > Import key pair**. - 1. Provide the key pair **Name** and your public key content. - -#### Create a Security Group - -The table below summarizes the two security groups that you should create. - -{{}} - -| Security Group Name | HTTP | HTTPS | SSH | -|------------------------------|---------------|----------------|-------------| -| sg-controller | NA | Anywhere-IPv4 | My IP | -| sg-data | Anywhere-IPv4 | Anywhere-IPv4 | My IP | - -{{}} -Table 1.3 AWS Inbound Security Group Source - -{{< call-out "warning" >}}Selecting **Anywhere-IPv4** as the _Source_ for **HTTP** and **HTTPS** will cause the instances placed inside your Security Group to be publicly accessible. If this is not suitable for you or your organization, please ensure that appropriate restrictions are in place. {{< /call-out >}} - -{{< call-out "note" >}}Select **My IP** as the _Source_ for **SSH** to prevent SSH connection attempts by anyone other than yourself. - -If you are not allowed to do this, refer to the [Terminal Access Using Session Manager](#session-manager) section below.{{< /call-out >}} - -
      - -Each host needs to be associated to a security group. The mapping of each host to the correct security group is shown below. -{{}} - -| Host | Security Group | -|------------------------------|----------------| -| NGINX Management Suite Host | sg-controller | -| Data Plane Host | sg-data | -| Developer Portal Host | sg-data | - -{{}} -Table 1.4 Host to Security Group Mapping - -
      - -Take the steps below to create a security group for access. Repeat these steps twice, once for **sg-controller** and once for **sg-data**. - -1. Go to the **EC2** Service. -1. On the left menu, select **Network & Security > Security Groups**. -1. Select **Create security group**. -1. In the **Basic details** section, provide the **Security group name**, **Description**, and select the **VPC** created from above. -1. In the **Inbound rules** section, refer to each traffic **Type** that corresponds to the security group being created from Table 1.2 above. -1. The **Outbound rules** should already allow all traffic by default. If it isn't, modify the rules so that it allows all traffic. -1. Select **Create security group**. - -#### Create EC2 Instance - -Take the steps below to create an EC2 Instance. Repeat these steps three times, once for each host shown in [Table 1.1]({{< relref "./aws-deploy.md#hosts-specs" >}}). - -1. Go to the **EC2** Service. -1. On the left menu, select **Instances > Instances**. -1. Select **Launch Instances**. -1. Provide the **Name** of your instance. -1. In the **Application and OS Images** section, select your [supported OS of choice]({{< ref "/nim/fundamentals/tech-specs#distributions" >}}). -1. Select your instance size in the **Instance Type** section. Refer to [Table 1.1]({{< relref "./aws-deploy.md#hosts-specs" >}}) for the suggested size of your host. Refer to [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs#system-sizing" >}}) for additional information. -1. In the **Key pair (login)** section, select the key pair that was created above. -1. In the **Network settings** section, select the **Edit** button. - - Provide your **VPC** and **Subnet** information. - - Select **Enable** for **Auto-assign public IP**. - - Select **Select existing security group**. - - Provide the security group created above shown in Table 1.4 that corresponds to your host for **Common security groups**. -1. In the **Configure Storage** section, select the storage amount required by your host. Refer to [Table 1.1]({{< relref "./aws-deploy.md#hosts-specs" >}}) for guidance to determine the suggested size. GP2 storage is suitable. Refer to [Technical Specifications]({{< ref "/nim/fundamentals/tech-specs#system-sizing" >}}) for additional information. - -#### Access EC2 Instance - -Take the steps below to obtain the public IP so you can access the instance through an SSH connection. - -1. Select **Instances > Instances** on the left menu. -1. Select your instance. -1. Select the **Details** tab. -1. The public IP address is shown in the **Public IPv4 address** section. This is the IP that allows external access (such as from your workstation) to the selected EC2 Instance. - {{< call-out "note" >}}It takes about a minute for the instance to become available for SSH connections.{{< /call-out >}} - -## NGINX Management Suite Host Installation - -Follow the [NGINX Management Suite Installation Guide]({{< ref "/nim/deploy/_index.md" >}}) to install both the **Instance Manager Module** and the **API Connectivity Manager Module**. The **Security Module** is not required for this demo. - -## NGINX Data Plane host - -Follow the steps in the [Set Up an API Gateway Environment]({{< ref "/nms/acm/getting-started/add-api-gateway" >}}) guide to create an API Gateway and deploy it to your NGINX data plane host. - -## NGINX Developer Portal host - -Follow the steps in the [Set Up a Developer Portal Environment]({{< ref "/nms/acm/getting-started/add-devportal" >}}) guide to create a Developer Portal and deploy it to your NGINX Dev Portal host. - -## Terminal Access Using Session Manager (Optional) {#session-manager} - -AWS allows you to enable SSH traffic to a specific Source IP Address which is much safer than exposing it to everyone on the internet. Even though exposing it to one IP may be good enough, it might not be sufficient for your company policy. It is possible to completely disable SSH traffic yet still have terminal access to your EC2 Instances. There are different ways of doing this, and one way covered here is using [AWS System Manager Session Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - -There are two methods of gaining terminal access via Session Manager: - -1. AWS Management Console -2. AWS Command Line Interface Tool - -Whichever method you decide, you need to take the following steps to properly configure your instances to allow connections from AWS Session Manager. Before continuing, ensure the [Session Manager Prerequisites](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-prerequisites.html) are met. - -### IAM Role - -You must create a new IAM Role that grants Session Manager access to EC2 Instances. This will be associated with the EC2 Instances needing terminal access. Take the instructions below to create an IAM Role for Session Manager. - -1. Log in to your AWS Account on your web browser. -1. Go to the **IAM** service. -1. On the left menu, select **Access management > Roles**. -1. Select **Create role**. -1. In the **Trusted entity type** section, select **AWS service**. -1. In the **Use case** section, select **EC2**. -1. Select **Next**. -1. In the **Permissions policies** section, select **AmazonSSMManagedInstanceCore**. You can filter for this name in the filter box. -1. Select **Next**. -1. Provide the **Role name** and **Tag** (optional) for this IAM Role specifically allowing Session Manager access to EC2 Instances. -1. Select **Create role**. - -{{< call-out "note" >}}Creating an IAM Role from the AWS Management Console and choosing EC2 as the AWS Service also creates an AWS Instance Profile associated with EC2 Instances. Additional details can be found in [the AWS knowledge article](https://aws.amazon.com/premiumsupport/knowledge-center/attach-replace-ec2-instance-profile/).{{< /call-out >}} - -### Associating IAM Instance Profile to EC2 Instance - -When you associate an _IAM Role_ created from the IAM service to an EC2 Instance, you are really associating an _IAM Instance Profile_. Again, when you create an _IAM Role_ from AWS Management Console and choose EC2 as the AWS Service, it also creates an _IAM Instance Profile_. Take the steps in this section to associate an _IAM Instance Profile_ to an _EC2 Instance_. - -There are two situations that can happen here: - -1. Associating IAM Instance Profile to an existing instance -1. Associating an IAM Instance Profile to a new instance - -#### Associating IAM Instance Profile to Existing Instance - -Take the steps below to associate an IAM Instance Profile to an existing EC2 Instance: - -1. Go to the **EC2** Service. -1. On the left menu, select **Instances > Instances**. -1. Right-click on the instance of interest. -1. Select **Security > Modify IAM role**. -1. Select the **IAM Instance Profile** from the list. - -#### Associating IAM Instance Profile on New Instance - -Associating an IAM Instance Profile to a new instance happens before the instance is created. The steps below assume you know how to get to the page where you provide information for the new instance you are about to create. You see this page after selecting **Launch instances** from **Instances > Instances** on the **EC2** Service. - -1. In the **Advanced details** section, expand the entire section. -1. Select your IAM Instance Profile for **IAM instance profile**. - -### Accessing EC2 Instance Terminal - -You can access the terminal of your instance by either: - -- AWS Management Console -- AWS Command Line Interface Tool - -#### AWS Management Console - -Take the steps below to get terminal access using **Session Manager**. - -1. Go to the **System Manager** Service. -1. On the left menu, select **Node Management > Session Manager**. -1. Verify you are on the **Sessions** tab. -1. Select **Create session**. -1. In the **Target Instances** section, select the instance of interest. -1. Select **Start session**. This takes you to the terminal where you are logged in as `ssm-user`. -1. When you are done, select **Terminate** at the top. - -{{< call-out "note" >}} If you do not see your instance in the **Target Instances** section: - -- Verify the IAM Instance Profile is associated to your instance. -- Verify the IAM Role has SSM permissions properly configured. -- The instance allows outbound HTTPS traffic to the endpoints shown in the **Connectivity to endpoints** row from the [Session Manager Prerequisites](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-prerequisites.html) page. -- Wait about 15 minutes if you attached the IAM Instance Profile to an existing instance. -{{< /call-out >}} - -### AWS Command Line Interface Tool - -Another way to get terminal access to instances is through AWS's CLI Tool. - -Take the steps below to fulfill prerequisites for using Session Manager on the command line interface: - -1. Install [AWS CLI Tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). -1. You must also install the [Session Manager Plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). -1. You need **AWS Access Key ID** and **AWS Secret Access Key**, which you can set up by referring to the [AWS CLI Prerequisite](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-prereqs.html) page. - -Take the steps below to get terminal access on an instance: - -1. Run `aws configure` to set up access to your AWS account. - - ```shell - $ aws configure - AWS Access Key ID []: ****************DLVT - AWS Secret Access Key []: ****************666r - Default region name []: - Default output format []: json - ``` - - {{< call-out "note" >}} If your AWS account is configured to use temporary credentials, you need to provide the `aws_session_token` by running the command below: - - aws configure set aws_session_token {{< /call-out >}} - -1. Run `aws ssm start-session --target ""` to start a session which provides terminal access. - - ```shell - $ aws ssm start-session --target "" - - Starting session with SessionId: aaaaaaaa-0538f063ab275aeed - $ - ``` - -1. To exit out of the session, type `exit` as if you were going to close a normal terminal screen. diff --git a/content/nms/acm/tutorials/enable-metrics.md b/content/nms/acm/tutorials/enable-metrics.md deleted file mode 100644 index 0a9471c9c..000000000 --- a/content/nms/acm/tutorials/enable-metrics.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -description: Learn how to enable and use metrics for F5 NGINX Management Suite API - Connectivity Manager. -nd-docs: DOCS-1055 -title: Enable Metrics -toc: true -weight: 100 ---- - -{{< shortversions "1.3.0" "latest" "acmvers" >}} - -## Overview - -This guide walks through setting up and using metrics in API Connectivity Manager. - -{{< call-out "important" >}}The configuration presented in this guide is for demonstration purposes only. Securely configuring environments and proxies in API Connectivity Manager is not in scope for this tutorial but should be given full attention when planning for production use.{{< /call-out >}} - -Currently, only the following metric is available: - -- [Count of proxies in an environment](#count-proxies-in-env) - -As we add new metrics, we'll let you know in the [API Connectivity Manager release notes]({{< ref "/nms/acm/releases/release-notes.md" >}}) and update this topic accordingly. - ---- - -## Before You Begin - -To complete the instructions in this guide, you need the following: - -- Access to a virtual environment -- Four virtual hosts with Linux installed — this guide uses [Ubuntu 20.04 LTS](https://releases.ubuntu.com/focal/). - -
      - Supported Linux distributions - - {{< include "nim/tech-specs/supported-distros.md" >}} - -
      - - {{}}It looks like you have to install both Go and Echo Server. Are there minimum requirements for these? Do we need to explain why these extra apps are needed? Customers installing in Prod might wonder, and they might even need to get approval to install them.{{}} - ---- - -## Host Setup - -This section configures the hosts used in this tutorial. In the following table, you'll find the details of the test environment used in this tutorial's examples. The options presented are the minimum host requirements for running a fully functional test environment. Remember that production environments may need more resources and incur greater costs. - -{{}} - -| Hosts | Virtual Cores | Memory | Storage | IP Address | Hostname | -|-----------------------------|---------------|--------|---------|-------------|-------------| -| F5 NGINX Management Suite Host | 2 vCPUs | 4GB | 100GB | `192.0.2.2` | `acm-ctrl` | -| Data Plane Host | 1 vCPU | 1GB | 10GB | `192.0.2.3` | `data-host` | -| Echo Server | 1 vCPU | 1GB | 10GB | `192.0.2.4` | `echo-host` | - -{{}} - -
      - -### Install NGINX Management Suite & API Connectivity Manager {#install-nsm-acm} - -Follow the steps in the [Installation Guide]({{< ref "/nim/deploy/_index.md" >}}) to set up NGINX Management Suite and API Connectivity Manager. You do not need to configure a Developer Portal for this tutorial. - -### Enable Metrics for API Connectivity Manager - -In `/etc/nms/acm.conf`, uncomment and set the `enable_metrics` property to `true`. - -``` bash -# set to true to enable metrics markers from the acm code -enable_metrics = true -``` - -Run the following command to restart the API Connectivity Manager service: - -```bash -sudo systemctl restart nms-acm -``` - -### Install NGINX Agent on Data Plane Host {#install-agent} - -Run the following commands to install the NGINX Agent on the data plane host, create a new Instance Group called `test-ig`, and add the host to it: - -``` shell -curl --insecure https://192.0.2.2/install/nginx-agent > install.sh \ -&& sudo sh install.sh -g test-ig \ -&& sudo systemctl start nginx-agent -``` - -To ensure that the advanced metrics modules are installed across all data plane hosts, please follow the steps in the [Install NGINX Plus Metrics Module]({{< ref "/nms/nginx-agent/install-nginx-plus-advanced-metrics.md" >}}) guide. - ---- - -### Install Echo Server {#install-echo-server} - -{{< call-out "note" >}} The server is designed for testing HTTP proxies and clients. It echoes information about HTTP request headers and bodies back to the client. {{< /call-out >}} - -1. [Download and install the latest version of Go](https://go.dev/doc/install) by following the instructions on the official Go website. -2. Run the following commands to install and start [Echo Server](https://github.com/jmalloc/echo-server): - - ```shell - go env -w GO111MODULE=off - go get -u github.com/jmalloc/echo-server/... - PORT=10000 LOG_HTTP_BODY=true LOG_HTTP_HEADERS=true echo-server - ``` - ---- - -## Configure API Connectivity Manager {#amc-config} - -In this section, we use the API Connectivity Manager REST API to set up a proxy in API Connectivity Manager. You need to pass the NGINX Management Suite user credentials in the Basic Authentication header for each REST request. - -### Create Workspaces & Environment {#create-workspace-environment} - -1. To create an Infrastructure Workspace with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces` endpoint: - - ```bash - POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces - ``` - -
      - - **JSON Request** - - ```json - { - "name": "infra-ws" - } - ``` - -1. To create an environment with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces/infra-ws/environments` endpoint. The `proxyClusterName`: `test-ig` is the name of the Instance Group that the data plane host was added to when you [installed the NGINX Agent](#install-agent) above. The `hostnames` array should contain the hostname of the data plane host. - - ```bash - POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments - ``` - -
      - - **JSON Request** - - ```json - { - "name": "demo-env", - "proxies": [ - { - "proxyClusterName": "test-ig", - "hostnames": [ - "data-host" - ] - } - ] - } - ``` - -2. To create a Service Workspace with a minimum configuration, send the following JSON request to the `/services/workspaces` endpoint. - - ```bash - POST https://192.0.2.2/api/acm/v1/services/workspaces - ``` - -
      - - **JSON Request** - - ```json - { - "name": "service-ws" - } - ``` - -### Create a Basic API Proxy {#create-basic-api-proxy} - -1. To create an API proxy with a minimum configuration and the default policies, send the following JSON request to the `/services/workspaces/service-ws/proxies` endpoint. The Proxy service target is our Echo Server. - - ```bash - POST https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies - ``` - - **JSON Request** - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ] - } - } - ``` - -2. To test whether the API Proxy and backend Echo Server are working correctly, send a custom header and dummy JSON body to show these proxied values in the Echo Server response: - - ```bash - POST https://192.0.2.4/my/test/api - HEADERS: - X-NGINX-Test: true - ``` - -
      - - **JSON Request** - - ```json - { - "testKey": "testValue" - } - ``` - -
      - - **Verification** - - If everything is configured correctly in API Connectivity Manager and the Echo Server, the response should be similar to the following example: - - ```bash - Request served by echo-host - - HTTP/1.0 POST /my/test/api - - Host: 192.0.2.4 - Accept: */* - Cache-Control: no-cache - Content-Length: 30 - Content-Type: application/json - X-Correlation-Id: c241b72519e71cf7bce9262910ffbe40 - X-Real-Ip: 192.0.2.1 - X-NGINX-Test: true - - {"testKey": "testValue"} - ``` - ---- - -## Get Count of Proxies in an Environment {#count-proxies-in-env} - -To get the count of active proxies, send the following REST request to the `/infrastructure/workspaces/infra-ws/environments/demo-env/api-count` endpoint: - -```bash -GET https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments/demo-env/api-count -``` - -If you've successfully configured a proxy the following count is returned. - -Response: - -```json - 1 -``` - ---- - -## View Environment Metrics {#view-env-metrics} - -1. On the left menu, select **Infrastructure**. -2. Select a workspace from the table. -3. Select the Actions menu (represented by an ellipsis, `...`) next to your environment on the **Actions** column. -4. Select **Metrics**. -5. Update the start and end time of the metrics with the **time range selection** on the dashboard overview. -6. To view metrics broken down by cluster in the environment, select the **API Gateway Clusters** tab. - ---- - -## View Proxy Metrics {#view-proxy-metrics} - -1. On the left menu, select **Services**. -2. Select a workspace from the table. -3. Select the Actions menu (represented by an ellipsis, `...`) next to your environment on the **Actions** column. -4. Select **Metrics**. -5. Update the start and end time of the metrics with the **time range selection** on the dashboard overview. -6. Filter by advanced routes with the **advanced route selection** on the dashboard overview. -7. To view metrics broken down by status code in the proxy, select the **API Gateway Clusters** tab. diff --git a/content/nms/acm/tutorials/introspection-keycloak.md b/content/nms/acm/tutorials/introspection-keycloak.md deleted file mode 100644 index 83b150da7..000000000 --- a/content/nms/acm/tutorials/introspection-keycloak.md +++ /dev/null @@ -1,1550 +0,0 @@ ---- -description: Learn how to set up an F5 NGINX Management Suite API Connectivity Manager - OAuth2 Introspection policy with Keycloak as the authorization server. -nd-docs: DOCS-954 -title: OAuth2 Introspection with Keycloak -toc: true -weight: 400 -type: -- tutorial ---- - -## Overview - -This tutorial walks through configuring an OAuth2 Introspection policy on an API Proxy in API Connectivity Manager with Keycloak as the authorization server. - -{{< call-out "important" >}}The configuration presented in this guide is for demonstration purposes only. The secure configuration of Environments and Proxies in API Connectivity Manager, or the secure configuration of Keycloak as the authorization server, is not in scope for this tutorial and should be given full attention when planning for production use.{{< /call-out >}} - -{{< call-out "note" >}}See the [OAuth2 Introspection Policy]({{< ref "/nms/acm/how-to/policies/introspection.md" >}}) reference guide for a detailed overview of the policy.{{< /call-out>}} - ---- - -## What is OAuth2? - -{{< include "acm/tutorials/what-is-OAuth2.md" >}} - ---- - -## Before You Begin - -To complete the instructions in this guide, you need the following: - -- Access to a virtual environment -- Four virtual hosts with Linux installed - this guide uses [Ubuntu 20.04 LTS](https://releases.ubuntu.com/focal/). - -
      - Supported Linux distributions - - {{< include "nim/tech-specs/supported-distros.md" >}} - -
      - ---- - -## Host Setup - -This section configures the hosts used in this tutorial. In the following table, you'll find the details of the test environment used in this tutorial's examples. The options presented are the minimum host requirements for running a fully functional test environment. Remember that production environments may need more resources and incur greater costs. - -{{}} - -| Hosts | Virtual Cores | Memory | Storage | IP Address | Hostname | -|------------------------------|---------------|--------|---------|---------------|-------------| -| F5 NGINX Management Suite Host | 2 vCPUs | 4GB | 100GB | `192.0.2.2` | `acm-ctrl` | -| Data Plane Host | 1 vCPU | 1GB | 10GB | `192.0.2.3` | `data-host` | -| Echo Server | 1 vCPU | 1GB | 10GB | `192.0.2.4` | `echo-host` | -| Authorization Server | 1 vCPU | 1GB | 10GB | `192.0.2.5` | `auth-host` | - -{{}} - -### Install NGINX Management Suite & API Connectivity Manager {#install-nsm-acm} - -1. Follow the steps in the [Installation Guide]({{< ref "/nim/deploy/_index.md" >}}) to set up NGINX Management Suite and API Connectivity Manager. You do not need to configure a Developer Portal for this tutorial. - -### Install NGINX Agent on Data Plane Host {#install-agent} - -1. Run the following commands to install the NGINX Agent on the data plane host, create a new Instance Group called `test-ig`, and add the host to it: - - ``` shell - curl --insecure https://192.0.2.2/install/nginx-agent > install.sh \ - && sudo sh install.sh -g test-ig \ - && sudo systemctl start nginx-agent - ``` - -### Install Echo Server {#install-echo-server} - -1. [Download and install the latest version of Go](https://go.dev/doc/install) by following the instructions on the official Go website. -2. Run the following commands to install and start [Echo Server](https://github.com/jmalloc/echo-server): - - ```shell - go env -w GO111MODULE=off - go get -u github.com/jmalloc/echo-server/... - PORT=10000 LOG_HTTP_BODY=true LOG_HTTP_HEADERS=true echo-server - ``` - -### Install Authorization Server {#install-auth-server} - -This tutorial uses Keycloak in **Development mode**. Development mode is suitable for people trying out Keycloak for the first time who want to get it up and running quickly. - -Development mode sets the following default configuration: - -- HTTP is enabled -- Strict hostname resolution is disabled -- The cache is set to local (no distributed cache mechanism is used for high availability) -- Theme-caching and template-caching are disabled. - -
      - (Optional) Production mode default configuration - -For all conventional and production use cases, we recommend starting Keycloak in **Production mode**, which follows a "secure by default" principle. - -Production mode sets the following default configuration: - -- HTTP is disabled as transport layer security (HTTPS) is essential -- Hostname configuration is expected -- HTTPS/TLS configuration is expected - -
      - -
      - -1. To install the Keycloak prerequisites run the following commands: - - ```shell - sudo apt-get update - sudo apt-get -y install openjdk-11-jre - ``` - -2. Download and extract the Keycloak tarball: - - ```shell - KEYCLOAK_VERSION=19.0.3 - - curl -L -o keycloak-${KEYCLOAK_VERSION}.tar.gz \ - https://github.com/keycloak/keycloak/releases/download/${KEYCLOAK_VERSION}/keycloak-${KEYCLOAK_VERSION}.tar.gz - tar -zxf keycloak-${KEYCLOAK_VERSION}.tar.gz - rm -rf keycloak-${KEYCLOAK_VERSION}.tar.gz - ``` - -3. Create environment variables for the Keycloak admin username and password: - - {{< call-out "important" >}} Do not use the example `admin/password` combination in any scenario. Replace the username and password with strong alternatives. {{< /call-out >}} - - ```shell - export KEYCLOAK_ADMIN= - export KEYCLOAK_ADMIN_PASSWORD= - ``` - -4. Start Keycloak in **Development Mode**: - - ```shell - cd keycloak-${KEYCLOAK_VERSION}/ - bin/kc.sh start-dev - ``` - ---- - -## Configure Keycloak {#configure-keycloak} - -In this section, we'll configure Keycloak as our OAuth2 authorization server. - -### Accessing the Keycloak UI - -Using the Keycloak admin credentials that you configured in the preceding [Install Authorization Server](#install-auth-server) section, you can access and log in to Keycloak web interface by going to: - -- `http://192.0.2.5:8080/admin` - -### Configure a Realm {#configure-realm} - -A _Realm_ manages a set of users, credentials, roles, and groups. A user belongs to and logs in to a Realm. Realms are isolated from one another and can manage and authenticate only the users they control. - -1. To create a Realm, select **Master** in the left navigation bar, then select **Add realm** from the list. -2. Enter the Realm details. For the purposes of this demonstration, our Realm will be called `nginx`. -3. Select **Create** to create the Realm. -4. The **Realm** list in the left navigation bar should now be set to `nginx`. - -### Configure a User {#configure-user} - -_Users_ are entities that can log in to your system. User attributes include an email, username, address, phone number, and birthday. Users can be assigned a group membership and have specific roles assigned to them. - -1. To create a user, select **Users**, then select **Create new User**. -2. Enter the user's details. For the purposes of this demonstration, just the required field **Username** is set as `nginx-user`. -3. Select **Create** to create the user. -4. To set the user's password, select the **Credentials** tab. -5. Select **Set Password**. Enter the desired password in the **Password** and **Password Confirmation** boxes. Set **Temporary** to **OFF**. -6. Select **Save**. -7. Select **Save Password** to confirm the password change. - -### Configure a Client {#configure-client} - -Clients are entities that can ask Keycloak to authenticate a user. Most often, clients are applications and services that use Keycloak to secure themselves and provide a single sign-on solution. Clients can also be entities that request identity information or an access token to invoke other services on the network that are secured by Keycloak. - -To configure a client, take the following steps: - -1. Select **Clients**. You will see a list of pre-created Keycloak clients. To create a new one, select **Create Client**. -2. Enter the details for the client. For the purposes of this demonstration, type `nginx-plus` for the **Client ID**. Leave **Client Type** as the default value **OpenID Connect**. -3. Select **Next** to continue. -4. In the **Capability Config** section of the client configuration, set **Client Authentication** as **On**. -5. Select **Save** to create the client. -6. Select the **Credentials** tab. In the **Client Authenticator** list, choose **Client ID and Secret**. -7. Copy the **Client Secret**. You will need this secret for authenticating the `nginx-plus` client with Keycloak. - -### Configure a Custom Role {#configure-custom-role} - -_Roles_ identify a user type or category. Typical roles in an organization include admin, user, manager, and employee. Applications often assign access and permissions to specific roles rather than individual users, as dealing with users can be too fine-grained and challenging to manage. - -To configure a custom role, take the following steps: - -1. Select **Realm Roles**. You will see a list of pre-created Keycloak roles. To create a new role, select **Create Role**. -2. Type the **Role Name**. For the purposes of this demonstration, use `nginx-keycloak-role` for the role name. -3. Select **Save**. -4. Once a role has been created, you need to assign the role to users. Select **Users**, then select the `nginx-user` user you created in the preceding [Configure a User](#configure-user) steps. -5. Select the **Role Mapping** tab, then select **Assign Role**. -6. Select the checkbox beside the `nginx-keycloak-role` role, then select **Assign**. - ---- - -## Test OAuth2 Token Introspection {#test-oauth2-token-introspection} - -Follow the steps in this section to test the OAuth functionality of Keycloak, token issuing, and token introspection. - -### Get the Keycloak Token Introspection Endpoints {#get-keycloak-introspection-endpoints} - -An introspection endpoint is needed to configure the Introspection policy in API Connectivity Manager. Additionally, a token endpoint is required for users to authenticate and access tokens for introspection. You can retrieve these endpoints using a REST API call to Keycloak. - -#### Structure - -```bash -curl -L -X GET http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/.well-known/openid-configuration -``` - -#### Example - -{{< call-out "note" >}} `jq` is used in the following examples to format the JSON response from Keycloak in a legible and attractive way. For more information about `jq` , see the [jq GitHub page](https://github.com/stedolan/jq). {{< /call-out >}} - -```bash -curl -L -X GET http://192.0.2.5:8080/realms/nginx/.well-known/openid-configuration | jq -``` - -JSON Response: - -```json -{ - "issuer": "http://192.0.2.5:8080/realms/nginx", - "authorization_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/auth", - "token_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token", - "introspection_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", - "userinfo_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/userinfo", - "end_session_endpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/logout", - "jwks_uri": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/certs", - "check_session_iframe": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/login-status-iframe.html", - "grant_types_supported": [ - "authorization_code", - "implicit", - "refresh_token", - "password", - "client_credentials", - "urn:ietf:params:oauth:grant-type:device_code", - "urn:openid:params:grant-type:ciba" - ] -} -``` - -
      - -### Generate a User Access Token {#generate-user-access-token} - -To generate an access token the below request structure is used: - -#### Structure - -```bash -curl -L -X POST 'http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/protocol/openid-connect/token' \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - --data-urlencode 'client_id=' \ - --data-urlencode 'grant_type=password' \ - --data-urlencode 'client_secret=' \ - --data-urlencode 'scope=openid' \ - --data-urlencode 'username=' \ - --data-urlencode 'password=' -``` - -#### Example - -```bash -curl -L -X POST 'http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token' \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - --data-urlencode 'client_id=nginx-plus' \ - --data-urlencode 'grant_type=password' \ - --data-urlencode 'client_secret=' \ - --data-urlencode 'scope=openid' \ - --data-urlencode 'username=nginx-user' \ - --data-urlencode 'password=password' \ - | jq -``` - -JSON Response: - -Keycloak will respond with a JSON object containing an `access_token` for the user `nginx-user`: - -```json -{ -"access_token": "", -"expires_in": 300, -"refresh_expires_in": 1800, -"refresh_token": "", -"token_type": "Bearer", -"id_token": "", -"not-before-policy": 0, -"session_state": "9836f5fd-987f-4875-ac75-f7dd5325047c", -"scope": "openid profile email" -} -``` - -Typically, the `access_token` is passed in requests to a **Resource Server** (API Proxy) as a `Bearer` token in the `Authorization` header. This is the default OAuth2 Introspection policy behavior in API Connectivity Manager. - -
      - -### Introspecting a User Access Token {#introspect-token-test} - -You can mimic the process by which an NGINX client introspects an incoming user `access_token` with Keycloak. - -{{< call-out "note" >}} Keycloak is configured to accept basic auth credentials from the `nginx-plus` client; in this case, the credentials are formatted as `CLIENT_ID:CLIENT_SECRET`. This combination must be [base64 url encoded](https://www.base64url.com/) before it is passed in the `Authorization` header. {{< /call-out >}} - -#### Structure - -```shell -curl -L -X POST 'http://{HOST/IP_ADDRESS}:{PORT}/realms/{REALM}/protocol/openid-connect/token/introspect' \ - -H "Authorization: Bearer " \ - -H "Accept: application/json" \ - -H "Content-Type: application/x-www-form-urlencoded" \ - --data-urlencode 'token=' \ - | jq -``` - -#### Example - -```shell -curl -L -X POST 'http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect' \ - -H "Authorization: Bearer " \ - -H "Accept: application/json" \ - -H "Content-Type: application/x-www-form-urlencoded" \ - --data-urlencode 'token=' - | jq -``` - -JSON Response: - -Keycloak responds with a token introspection JSON response with associated claims that NGINX can extract and forward to backend services. - -```json -{ - "active": true, - "exp": 1665585794, - "iat": 1665585494, - "jti": "c8723771-2474-4c94-b155-f78a4583419f", - "iss": "http://192.0.2.5:8080/realms/nginx", - "aud": "account", - "sub": "a95117bf-1a2e-4d46-9c44-5fdee8dddd11", - "typ": "Bearer", - "azp": "nginx-plus", - "session_state": "b7ca9271-02ce-453f-b491-61ec4e648d5d", - "given_name": "", - "family_name": "", - "preferred_username": "nginx-user", - "email_verified": false, - "acr": "1", - "scope": "openid profile email", - "sid": "b7ca9271-02ce-453f-b491-61ec4e648d5d", - "client_id": "nginx-plus", - "username": "nginx-user", - "realm_access": { - "roles": [ - "default-roles-nginx", - "offline_access", - "nginx-keycloak-role", - "uma_authorization" - ] - }, - "resource_access": { - "account": { - "roles": [ - "manage-account", - "manage-account-links", - "view-profile" - ] - } - } -} -``` - -
      - -At this checkpoint in the tutorial, Keycloak is sufficiently configured for token introspection. - ---- - -## Configure API Connectivity Manager {#amc-config} - -In this section, we will use the API Connectivity Manager Rest API to set up a proxy in API Connectivity Manager. You'll need to pass the NGINX Management Suite user credentials in the Basic Authentication header for each REST request. - -### Creating Workspaces & Environment {#create-workspace-environment} - -1. To create an Infrastructure Workspace with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces` endpoint: - - ```bash - POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces - ``` - - **JSON Request** - - ```json - { - "name": "infra-ws" - } - ``` - -1. To create an Environment with a minimum configuration, send the following JSON request to the `/infrastructure/workspaces/infra-ws/environments` endpoint. The `proxyClusterName`, `test-ig`, is the name of the Instance Group that the data plane host was added to when you [installed the NGINX Agent](#install-agent) above. The `hostnames` array should contain the hostname of the data plane host. - - ```bash - POST https://192.0.2.2/api/acm/v1/infrastructure/workspaces/infra-ws/environments - ``` - - **JSON Request** - - ```json - { - "name": "demo-env", - "proxies": [ - { - "proxyClusterName": "test-ig", - "hostnames": [ - "data-host" - ] - } - ] - } - ``` - -3. To create a Service Workspace with a minimum configuration, send the following JSON request to the `/services/workspaces` endpoint. - - ```bash - POST https://192.0.2.2/api/acm/v1/services/workspaces - ``` - - **JSON Request** - - ```json - { - "name": "service-ws" - } - ``` - -### Create a Basic API Proxy {#create-basic-api-proxy} - -1. To create an API proxy with a minimum configuration and no non-default policies, send the following JSON request to the `/services/workspaces/service-ws/proxies` endpoint. The Proxy service target is our echo server. - - ```bash - POST https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies - ``` - - **JSON Request** - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ] - } - } - ``` - -2. To test whether the API Proxy and backend echo server are working correctly, send a custom header and dummy JSON body to show these proxied values in the echo server response. - - ```bash - POST https://192.0.2.4/my/test/api - HEADERS: - X-NGINX-Test: true - ``` - - **JSON Request** - - ```json - { - "testKey": "testValue" - } - ``` - - **Expected Result** - - If everything is configured correctly in API Connectivity Manager and the echo server, the response should be similar to the following example: - - ```bash - Request served by echo-host - - HTTP/1.0 POST /my/test/api - - Host: 192.0.2.4 - Accept: */* - Cache-Control: no-cache - Content-Length: 30 - Content-Type: application/json - X-Correlation-Id: c241b72519e71cf7bce9262910ffbe40 - X-Real-Ip: 192.0.2.1 - X-NGINX-Test: true - - {"testKey": "testValue"} - ``` - -### Upsert OAuth2 Introspection Policy - -1. Upsert the API proxy with an OAuth2 Introspection policy. The default `action.introspectionResponse` type `application/json` is used, so you don't need to define it in the API request body. - - {{< call-out "note" >}} This shortened request body removes all the default API proxy policies. To maintain these default policies, perform a `GET` request on the proxy before the upsert and copy the policy configuration from the response. {{< /call-out >}} - - ```bash - PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy - ``` - - **JSON Request** - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ], - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect" - }, - "data": [ - { - "clientAppID": "nginx-plus", - "clientSecret": "" - } - ] - } - ] - } - } - } - ``` - -### Testing the Introspection Policy {#test-introspection-policy} - -1. Using the same echo server request from the previous section, test the echo server again: - - ```bash - POST https://192.0.2.4/my/test/api - ``` - - If you've successfully configured and applied the OAuth2 Introspection policy, the request is blocked from reaching the backend, and `401 Unauthorized` is returned. - - JSON Response: - - ```json - { - "message": "Unauthorized", - "status": "401" - } - ``` - -2. Using the Keycloak user you created in the [Configure a User](#configure-user) section above, obtain an access token from Keycloak. Follow the steps you completed in the [Generate a User Access Token](#generate-user-access-token) section. -3. Copy the `access_token` in the JSON response that's returned from Keycloak. -4. In the next request to the echo server, add a request header with the following details: - - - key: `Authorization` - - value: `Bearer `, where `` is the token you copied in step 3. - - The `` is prefixed with `Bearer` because it's passed as a [bearer token](https://www.rfc-editor.org/rfc/rfc6750) to the API proxy. - - If the OAuth2 Introspection policy has been configured and applied successfully, the request is blocked from reaching the backend, and `401 Unauthorized` is returned. - - ```bash - POST https://192.0.2.4/my/test/api - HEADERS: - Authorization: 'Bearer ' - ``` - - The access token is taken from the `Authorization` header and introspected against the Keycloak introspection endpoint defined in the policy configuration. If the OAuth2 server responds with `"active": true` in the introspection response, the request proceeds to the backend. The response should look like the following example: - - ```bash - Request served by echo-host - - HTTP/1.0 POST /my/test/api - - Host: default_http_a4334620-226b-491d-8503-e0724bdf5521 - Accept: */* - Accept-Encoding: gzip, deflate, br - Cache-Control: no-cache - Connection: close - Content-Length: 30 - Content-Type: application/json - X-Correlation-Id: ffc5dc656e220a20fa57835e0653f19f - X-Token-Exp: 1666003414 - X-Token-Scope: openid email profile - X-Token-Username: nginx-user - ``` - -There are a few things to note here: - -- The default headers changed because we removed the default request headers policy when upserting the Introspection policy. This won't happen in environments where default policies are included in the upsert request, in which case, the default request headers are forwarded to the backend services. -- There are new headers proxied to the backend `X-Token-Exp`, `X-Token-Scope`, and `X-Token-Scope`. These are the default claims defined in the policy configuration value `action.forwardedClaimsInProxyHeader`, and values are taken from the Identify Provider (IdP) introspection response to the in-flight request. -- There is no `Authorization` header in the request forwarded to the backend. This is because NGINX strips the incoming user access token from the header or query parameters regardless of the key used. - -If you pass an inactive or invalid token and perform the same request above, the request is blocked from reaching the backend, and `403 Forbidden` is returned.` - -```json -{ - "message": "Forbidden", - "status": "403" -} -``` - -You can check the logs on the data host to determine the cause of the `403 Forbidden` response. There may be several reasons for a forbidden response message; however, the user only sees `403 Forbidden` in all cases, except where no access token is provided. In that case, the response is `401 Not Authorized`. - -```bash -cat /var/log/nginx/data-host-error.log -2022/10/17 10:23:11 [error] 35643#35643: *15 js: OAuth introspection access_token not provided. -2022/10/17 11:24:30 [error] 39542#39542: *49 js: OAuth token introspection found inactive token. -``` - -### Custom Token Placement & Key {#custom-token-placement-key} - -You can configure the Introspection policy to let users pass their access token as a header or query parameter using any key name. By default, the access token is given in the `Authorization` header as a bearer token. The `Bearer` prefix is required when the access token is passed in the `Authorization` header. If the header is changed from this default `Authorization` value, passing a `Bearer` prefix will render the request invalid. - -`action.clientTokenSuppliedIn` configures how the access token is passed in the user request; `action.clientTokenName`configures the key under which the access token is extracted from the user request. - -1. Upsert the proxy with an updated Introspection policy configuration, where the access token is passed in the request headers as `apiAccessToken`. The default value for `action.clientTokenSuppliedIn` is `HEADER`, so you don't need to include it in the API request body. - - ```bash - PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy - ``` - - **JSON Request** - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ], - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", - "clientTokenName": "apiAccessToken" - }, - "data": [ - { - "clientAppID": "nginx-plus", - "clientSecret": "" - } - ] - } - ] - } - } - } - ``` - -2. In the next request to the echo server, change the request header so the access token is passed in `apiAccessToken`. - - ```bash - POST https://192.0.2.4/my/test/api - HEADERS: - apiAccessToken: '' - ``` - - The request should proceed to the backend service as expected, and the echo server should respond in turn. - -3. Upsert the proxy with an updated Introspection policy configuration, where the access token is passed in the query arguments as `queryAuthz` with `action.clientTokenSuppliedIn` set to `QUERY`. - - ```bash - PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy - ``` - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ], - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", - "clientTokenSuppliedIn": "QUERY", - "clientTokenName": "queryAuthz" - }, - "data": [ - { - "clientAppID": "nginx-plus", - "clientSecret": "" - } - ] - } - ] - } - } - } - ``` - -4. In the next request to the echo server, remove the custom request header `apiAccessToken` and pass the access token in the query argument `queryAuthz`. - - ```bash - POST https://192.0.2.4/my/test/api?queryAuthz= - ``` - - The request should proceed to the backend service as expected, and the echo server should respond in turn. Similar to passing the access token as a header, the user's access token is stripped from the in-flight request before it's forwarded to the backend service. - -### Token Caching {#token-caching} - -OAuth2 token introspection is provided by the Identify Provider (IdP) at a JSON/REST endpoint, so the standard response is a JSON object with HTTP status 200. When this response is keyed against the access token, it becomes highly cacheable. - -You can configure NGINX to cache a copy of the introspection response for each access token. Then, the next time the same access token is presented, NGINX serves the cached introspection response instead of making an API call to the IdP. Token caching vastly improves overall latency for subsequent requests. You can manage how long cached responses are used to mitigate the risk of accepting an expired or recently revoked access token. For example, suppose an API client typically makes a burst of several API calls over a short period. In that case, a cache validity of 10 seconds might be sufficient to provide a measurable improvement in user experience. - -#### Security Considerations {#security-considerations} - -{{< call-out "important" >}}There are some security considerations to keep in mind when enabling token caching. For example, a shorter cache expiration time is more secure since the resource servers must query the introspection endpoint more frequently; however, the increased number of queries may put a load on the endpoint. Longer expiration times, by comparison, open a window where a token may actually be expired or revoked but still be able to be used at a resource server for the remaining cache time. - -You can mitigate these situations by never caching the value beyond the token's expiration time. For example, in Keycloak, the default token duration is **300 seconds**. This should be the upper limit of token caching in the Introspection policy configuration.{{< /call-out >}} - -#### Token Caching Setup {#token-caching-setup} - -You can configure token caching in the Introspection policy by setting the `action.cacheIntrospectionResponse` value. An NGINX unit-of-time measurement is expected in seconds, minutes, or hours. By default, token caching is enabled for a five-minute (`5m`) cache duration. Setting the value to `0s`, `0m`, or `0h` disables caching. - -1. Upsert the proxy with an Introspection policy configuration to set a token cache duration of ten seconds (`10s`). - - ```bash - PUT https://192.0.2.2/api/acm/v1/services/workspaces/service-ws/proxies/test-proxy - ``` - - ```json - { - "name": "test-proxy", - "version": "v1", - "proxyConfig": { - "hostname": "data-host", - "ingress": { - "basePath": "/", - "basePathVersionAppendRule": "NONE" - }, - "backends": [ - { - "serviceName": "backend-echo-svc", - "serviceTargets": [ - { - "hostname": "192.0.2.4", - "listener": { - "enableTLS": false, - "port": 10000, - "transportProtocol": "HTTP" - } - } - ] - } - ], - "policies": { - "oauth2-introspection": [ - { - "action": { - "introspectionEndpoint": "http://192.0.2.5:8080/realms/nginx/protocol/openid-connect/token/introspect", - "cacheIntrospectionResponse": "10s" - - }, - "data": [ - { - "clientAppID": "nginx-plus", - "clientSecret": "" - } - ] - } - ] - } - } - } - ``` - -2. Send a request to the echo server API proxy with the provided access token. The introspection token response will be cached in the data host. - -3. To verify whether token caching is successful, locate the token cache Realm on the data host. Realms follow the pattern `tokens__