From c80753bda2e9b7a6a633c2ce5ae4f7b4b323977e Mon Sep 17 00:00:00 2001 From: rosie yohannan Date: Tue, 4 Nov 2025 11:20:13 +0000 Subject: [PATCH 01/10] set up server 4.9 docs --- antora-playbook.yml | 1 + docs/server-admin-4.9/antora.yml | 10 + docs/server-admin-4.9/modules/ROOT/nav.adoc | 48 + .../ROOT/partials/installation/phase-1.adoc | 994 +++++++++++ .../ROOT/partials/installation/phase-2.adoc | 1005 +++++++++++ .../ROOT/partials/installation/phase-3.adoc | 1176 +++++++++++++ .../ROOT/partials/installation/phase-4.adoc | 53 + .../pages/additional-considerations.adoc | 126 ++ .../pages/example-values.adoc | 120 ++ .../pages/phase-1-prerequisites.adoc | 150 ++ .../phase-2-configure-object-storage.adoc | 197 +++ .../phase-3-install-circleci-server.adoc | 169 ++ .../phase-4-configure-nomad-clients.adoc | 134 ++ .../pages/phase-5-test-your-installation.adoc | 48 + .../pages/hardening-your-cluster.adoc | 279 +++ .../pages/installation-reference.adoc | 1520 +++++++++++++++++ .../installing-server-behind-a-proxy.adoc | 37 + .../pages/phase-1-aws-prerequisites.adoc | 9 + .../pages/phase-1-gcp-prerequisites.adoc | 9 + .../pages/phase-2-aws-core-services.adoc | 9 + .../pages/phase-2-gcp-core-services.adoc | 9 + .../phase-3-aws-execution-environments.adoc | 9 + .../phase-3-gcp-execution-environments.adoc | 9 + .../pages/phase-4-aws-post-installation.adoc | 9 + .../pages/phase-4-gcp-post-installation.adoc | 9 + .../installation/pages/upgrade-server.adoc | 50 + .../operator/pages/application-lifecycle.adoc | 68 + .../operator/pages/backup-and-restore.adoc | 102 ++ .../circleci-server-security-features.adoc | 156 ++ .../pages/configuring-external-services.adoc | 362 ++++ .../operator/pages/data-retention.adoc | 90 + .../expanding-internal-database-volumes.adoc | 325 ++++ .../modules/operator/pages/faq.adoc | 13 + ...troduction-to-nomad-cluster-operation.adoc | 159 ++ ...ual-machines-with-machine-provisioner.adoc | 254 +++ .../pages/managing-build-artifacts.adoc | 100 ++ .../pages/managing-load-balancers.adoc | 30 + .../modules/operator/pages/managing-orbs.adoc | 67 + .../pages/managing-user-accounts.adoc | 63 + ...monitoring-stack-reference-helm-chart.adoc | 11 + .../operator/pages/operator-overview.adoc | 75 + .../pages/troubleshooting-and-support.adoc | 125 ++ .../modules/operator/pages/upgrade-mongo.adoc | 95 ++ .../operator/pages/usage-data-collection.adoc | 12 + .../operator/pages/user-authentication.adoc | 10 + .../pages/circleci-server-overview.adoc | 411 +++++ .../modules/overview/pages/release-notes.adoc | 19 + 47 files changed, 8736 insertions(+) create mode 100644 docs/server-admin-4.9/antora.yml create mode 100644 docs/server-admin-4.9/modules/ROOT/nav.adoc create mode 100644 docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc create mode 100644 docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc create mode 100644 docs/server-admin-4.9/modules/ROOT/partials/installation/phase-3.adoc create mode 100644 docs/server-admin-4.9/modules/ROOT/partials/installation/phase-4.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/additional-considerations.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/example-values.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc create mode 100644 docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/hardening-your-cluster.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/installation-reference.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/installing-server-behind-a-proxy.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-1-aws-prerequisites.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-1-gcp-prerequisites.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-2-aws-core-services.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-2-gcp-core-services.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-3-aws-execution-environments.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-3-gcp-execution-environments.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-4-aws-post-installation.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/phase-4-gcp-post-installation.adoc create mode 100644 docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/application-lifecycle.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/circleci-server-security-features.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/data-retention.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/faq.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/managing-load-balancers.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/operator-overview.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/troubleshooting-and-support.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/usage-data-collection.adoc create mode 100644 docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc create mode 100644 docs/server-admin-4.9/modules/overview/pages/circleci-server-overview.adoc create mode 100644 docs/server-admin-4.9/modules/overview/pages/release-notes.adoc diff --git a/antora-playbook.yml b/antora-playbook.yml index 36f326b3ca..f7a34ff46d 100644 --- a/antora-playbook.yml +++ b/antora-playbook.yml @@ -49,6 +49,7 @@ asciidoc: serverversion46: 4.6.6 serverversion47: 4.7.11 serverversion48: 4.8.4 + serverversion49: 4.9.0 serverversion: 3.4.8 terraformversion: 0.15.4 kubectlversion: 1.19 diff --git a/docs/server-admin-4.9/antora.yml b/docs/server-admin-4.9/antora.yml new file mode 100644 index 0000000000..cdae83c61e --- /dev/null +++ b/docs/server-admin-4.9/antora.yml @@ -0,0 +1,10 @@ +name: server-admin +title: Server +version: server-4.9 +display_version: Server 4.9 +asciidoc: + attributes: + kubernetesversions: 1.31 - 1.33 +nav: +- modules/ROOT/nav.adoc +start_page: overview:circleci-server-overview.adoc \ No newline at end of file diff --git a/docs/server-admin-4.9/modules/ROOT/nav.adoc b/docs/server-admin-4.9/modules/ROOT/nav.adoc new file mode 100644 index 0000000000..eb1cc0a5f3 --- /dev/null +++ b/docs/server-admin-4.9/modules/ROOT/nav.adoc @@ -0,0 +1,48 @@ +* xref:overview:circleci-server-overview.adoc[CircleCI server Overview] +* xref:overview:release-notes.adoc[Release notes] + +* Installing CircleCI server +** Install on AWS +*** xref:installation:phase-1-aws-prerequisites.adoc[Phase 1: AWS prerequisites] +*** xref:installation:phase-2-aws-core-services.adoc[Phase 2: AWS Core services installation] +*** xref:installation:phase-3-aws-execution-environments.adoc[Phase 3: AWS execution environments installation] +*** xref:installation:phase-4-aws-post-installation.adoc[Phase 4: AWS post-installation] +** Install on GCP +*** xref:installation:phase-1-gcp-prerequisites.adoc[Phase 1: GCP prerequisites] +*** xref:installation:phase-2-gcp-core-services.adoc[Phase 2: GCP Core services installation] +*** xref:installation:phase-3-gcp-execution-environments.adoc[Phase 3: GCP execution environments installation] +*** xref:installation:phase-4-gcp-post-installation.adoc[Phase 4: GCP post-installation] +** Install in an air-gapped environment +*** xref:air-gapped-installation:phase-1-prerequisites.adoc[Phase 1 - Prerequisites] +*** xref:air-gapped-installation:phase-2-configure-object-storage.adoc[Phase 2 - Configure object storage] +*** xref:air-gapped-installation:phase-3-install-circleci-server.adoc[Phase 3 - Install CircleCI server] +*** xref:air-gapped-installation:phase-4-configure-nomad-clients.adoc[Phase 4 - Configure Nomad clients] +*** xref:air-gapped-installation:phase-5-test-your-installation.adoc[Phase 5 - Test installation] +*** xref:air-gapped-installation:additional-considerations.adoc[Additional considerations] +*** xref:air-gapped-installation:example-values.adoc[Example Values YAML] +** xref:installation:hardening-your-cluster.adoc[Hardening your cluster] +** xref:installation:installing-server-behind-a-proxy.adoc[Installing server behind a proxy] +** xref:installation:upgrade-server.adoc[Upgrading server] +** xref:installation:installation-reference.adoc[Installation reference] + + +* CircleCI server operator guide +** xref:operator:operator-overview.adoc[Operator overview] +** xref:operator:introduction-to-nomad-cluster-operation.adoc[Introduction to Nomad cluster operation] +** xref:operator:managing-user-accounts.adoc[Managing user accounts] +** xref:operator:managing-orbs.adoc[Managing orbs] +** xref:operator:manage-virtual-machines-with-machine-provisioner.adoc[Manage virtual machines with machine provisioner] +** xref:operator:configuring-external-services.adoc[Configuring external services] +** xref:operator:data-retention.adoc[Data retention in server] +** xref:operator:expanding-internal-database-volumes.adoc[Expanding internal database volumes] +** xref:operator:managing-load-balancers.adoc[Managing load balancers] +** xref:operator:user-authentication.adoc[User authentication] +** xref:operator:managing-build-artifacts.adoc[Managing build artifacts] +** xref:operator:usage-data-collection.adoc[Usage data collection] +** xref:operator:circleci-server-security-features.adoc[CircleCI server security features] +** xref:operator:application-lifecycle.adoc[Application lifecycle] +** xref:operator:troubleshooting-and-support.adoc[Troubleshooting and support] +** xref:operator:backup-and-restore.adoc[Backup and restore] +** xref:operator:upgrade-mongo.adoc[Upgrade MongoDB] +** xref:operator:monitoring-stack-reference-helm-chart.adoc[Monitoring stack reference Helm chart] +** xref:operator:faq.adoc[FAQs] diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc new file mode 100644 index 0000000000..c70eaf745d --- /dev/null +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc @@ -0,0 +1,994 @@ +CircleCI server {serverversion49} is installed as a Helm chart. The installation process is broken down into four phases with a validation step at the end of each phase, allowing you to confirm success before moving to the next phase. Depending on your requirements, phases 3 and 4 may include multiple steps. This installation guide assumes you have already read the xref:server-admin:overview:circleci-server-overview.adoc#[CircleCI server 4.9 overview]. + +NOTE: In the following sections, replace any sections indicated by `< >` with your details. + +[#install-required-software] +== 1. Install required software +Download and install the following software before continuing: + +[.table.table-striped] +[cols=4*, options="header", stripes=even] +|=== +| Tool +| Version +| Used for +| Notes + +| link:https://kubernetes.io/docs/tasks/tools/install-kubectl/[kubectl] +| {kubectlversion} or greater +| Kubernetes CLI +| + +| link:https://helm.sh/[Helm] +| {helmversion} or greater +| Kubernetes Package Management +| + +| link:https://github.com/databus23/helm-diff[Helm Diff] +| {helmdiffversion} or greater +| Helping with `values.yaml` changes and updates +| Optional, but may help with troubleshooting between releases + +ifndef::env-gcp[] +| link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html[AWS CLI] +| Latest +| Managing AWS resources +| AWS installs only +endif::env-gcp[] + +ifndef::env-aws[] +| `gcloud` and `gsutil` as part of the link:https://cloud.google.com/sdk/docs/[Google Cloud SDK]. +| Latest +| Managing GCP resources +| GCP installs only +endif::env-aws[] + +| link:https://www.terraform.io/downloads.html[Terraform] +| {terraformversion} or greater +| Infrastructure Management +| Required for installations within GCP or AWS + +| Install and configure link:https://min.io/docs/minio/linux/reference/minio-mc.html[MinIO CLI] for your storage provider if you choose not to use AWS or GCP object storage. +| TBC +| TBC +| Required for installations outside AWS and GCP, for example, local installation. + +|=== + +[#create-a-vpc] +== 2. Create a VPC + +NOTE: **Installing server locally?** you can skip this step, but you should ensure you follow the resourcing recommendation of using /18 CIDR blocks to ensure you have adequate capacity for both your Kubernetes Cluster and the provisioning of Nomad clients to run your jobs. + +If you are installing server in the cloud, either AWS or GCP, you will need to create a new virtual private cloud (VPC). + +ifndef::env-gcp[] +Refer to the link:https://docs.aws.amazon.com/eks/latest/userguide/creating-a-vpc.html[AWS guide to creating a VPC]. +endif::env-gcp[] + +ifndef::env-aws[] +Refer to thelink:https://cloud.google.com/vpc/docs/create-modify-vpc-networks#console[GCP guide to creating a VPC]. +endif::env-aws[] + +[#vpc-cluster-sizing-recommendations] +=== VPC and cluster sizing recommendations + +While there are no strict requirements for VPC (Virtual Private Cloud) setup or disk size, the following practices are recommended for optimal performance and stability in production environments. + +* **VPC Selection** ++ +We recommend you create a new VPC specifically for CircleCI server and its components, rather than using an existing VPC. + +* **Subnet Provisioning** ++ +For high availability, and to avoid potential outages, you should provision subnets using /18 CIDR blocks across multiple Availability Zones. + + +[#create-a-kubernetes-cluster] +== 2. Create a Kubernetes cluster +CircleCI server installs into an existing Kubernetes cluster. If you have not already created a cluster, you should do so next. Instructions for creating a cluster are provided below, or if you are installing locally, first consider the sizing, version, and permissions requirements. + +[#cluster-requirements] +=== Cluster requirements + +[#compute-resources] +==== Compute resources +The application uses a large number of resources. Depending on your usage, your Kubernetes cluster should meet the following requirements: + +[.table.table-striped] +[cols=5*, options="header", stripes=even] +|=== +| Number of daily active CircleCI users +| Minimum Nodes +| Total CPU +| Total RAM +| NIC speed + +| < 500 +| 4 +| 24 cores +| 90 GB +| 1 Gbps + +| 500+ +| 6 +| 48 cores +| 240 GB +| 10 Gbps +|=== + +[#supported-kubernetes-versions] +==== Supported Kubernetes versions + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| CircleCI Version +| Kubernetes Version + +| 4.9.x +| {kubernetesversions} +|=== + +[#minimum-permissions-requirments] +==== Minimum permissions requirements + +The installing user must have **at least** admin permissions for the namespace into which CircleCI is to be installed. + +// Don't include this section in the GCP page. +ifndef::env-gcp[] + +[#eks] +=== EKS +You can learn more about creating an Amazon EKS cluster in the link:https://aws.amazon.com/quickstart/architecture/amazon-eks/[EKS docs]. We recommend using link:https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html[`eksctl`] to create your cluster, which creates a VPC and selects the proper security groups for you. + +. link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html[Install] and link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html[configure] the AWS CLI for your AWS account. +. Install link:https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html[`eksctl`]. +. Create your cluster by running the following (Cloud formation with `eksctl` and EKS can take more than 20 minutes to complete): ++ +[source,console] +---- +$ eksctl create cluster --name=circleci-server --nodes 4 --node-type m5.2xlarge --with-oidc --region "" +---- ++ +`--with-oidc` as in the example above is used to add an IAM OIDC provider to your cluster. This provider is necessary for adding the link:https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html[EBS CSI driver] and enabling link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[IRSA roles] on your cluster. See below for more information. `eksctl` may be further extended with other flags to customize your EKS cluster following their link:https://eksctl.io/usage/creating-and-managing-clusters/[documentation] +. Once the cluster has been created, you can use the following command to configure `kubectl` access: ++ +[source,console] +---- +$ eksctl utils write-kubeconfig --cluster circleci-server +---- +. As of EKS 1.24, EKS will no longer have the EBS CSI Driver installed by default. This means that EKS cannot manage the EBS volumes for the persistent volumes of your cluster. CircleCI uses persistent volumes for services such as PostgreSQL, MongoDB, Redis, RabbitMQ and Vault. You may manually provision persistence volumes or you may install the EBS CSI Driver on your cluster following the link:https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html[AWS documentation]. + +NOTE: You may see the following error `AWS STS Access - cannot get role ARN for current session: InvalidClientTokenID`. This means your AWS credentials are invalid, or your IAM user does not have permission to create an EKS cluster. Proper IAM permissions are necessary to use `eksctl`. See the AWS documentation on link:https://aws.amazon.com/iam/features/manage-permissions/[IAM permissions]. + +endif::env-gcp[] + +// Don't include this section in the AWS page. +ifndef::env-aws[] + +[#gke] +=== GKE +You can learn more about creating a GKE cluster in the link:https://cloud.google.com/kubernetes-engine/docs/how-to#creating-clusters[GKE docs]. + +CAUTION: Do not use an Autopilot cluster. CircleCI requires functionality that is not supported by GKE Autopilot. + +. link:https://cloud.google.com/sdk/gcloud[Install] and link:https://cloud.google.com/kubernetes-engine/docs/quickstart#defaults[configure] the GCP CLI for your GCP account. This includes creating a Google Project, which will be required to create a cluster within your project. ++ +NOTE: When you create your project, make sure you also enable API access. If you do not enable API access, the command we will run next (to create your cluster) will fail. ++ +Setting the default `project id`, compute `zone` and `region` will make running subsequent commands easier: ++ +[source,shell] +---- +gcloud config set project +gcloud config set compute/zone +gcloud config set compute/region +---- +. Create your cluster ++ +TIP: CircleCI recommends using link:https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity[Workload Identity] to allow workloads/pods in your GKE clusters to impersonate Identity and Access Management (IAM) service accounts to access Google Cloud services. Use the following command to provision a simple cluster: ++ +[source,shell] +---- +gcloud container clusters create circleci-server \ + --num-nodes 5 \ + --machine-type n1-standard-8 \ + --workload-pool=.svc.id.goog \ + --network \ + --subnetwork +---- ++ +NOTE: Your kube-context should get updated with the new cluster credentials automatically. ++ +If you need to update your kube-context manually, you can by running the following: ++ +[source,shell] +---- +gcloud container clusters get-credentials circleci-server +---- +. Install the link:https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke[GKE authentication plugin] for `kubectl`: ++ +[source,shell] +---- +gcloud components install gke-gcloud-auth-plugin +---- +. Verify your cluster: ++ +[source,shell] +---- +kubectl cluster-info +---- + +[#enable-workload-identity-in-gke] +==== Enable Workload Identity in GKE (optional) +Follow these steps if you already have a GKE cluster and need to enable Workload Identity on the cluster and the node pools. + +. Enable Workload Identity on existing cluster: ++ +[source,shell] +---- + gcloud container clusters update "" \ + --workload-pool=".svc.id.goog" +---- +. Get node pools of existing GKE cluster: ++ +[source,shell] +---- + gcloud container node-pools list --cluster "" +---- + +. Update existing node pools: ++ +[source,shell] +---- + gcloud container node-pools update "" \ + --cluster="" \ + --workload-metadata="GKE_METADATA" +---- + +You must repeat Step 3 for all the existing node pools. Follow these links for steps to enable Workload Identity for your Kubernetes service accounts: + +* xref:server-admin:installation:phase-3-gcp-execution-environments.adoc#gcp[Nomad Autoscaler] +* xref:server-admin:installation:phase-3-gcp-execution-environments.adoc#gcp-authentication[VM] +* <> + +endif::env-aws[] + +[#create-a-new-github-oauth-app] +== 3. Create a new GitHub OAuth app + +CAUTION: If GitHub Enterprise and CircleCI server are not on the same domain, then images and icons from GHE will fail to load in the CircleCI web app. + +Registering and setting up a new GitHub OAuth app for CircleCI server allows for the following: + +* Authorization control to your server installation using GitHub OAuth . +* Updates to GitHub projects/repos using build status information. + +The following steps apply for both GitHub.com and GitHub Enterprise. + +. In your browser, navigate to menu:Your-GitHub-instance[User Settings>Developer Settings>OAuth Apps] and select **New OAuth App**. ++ +.New GitHub OAuth App +image::guides:ROOT:github-oauth-new.png[Screenshot showing setting up a new OAuth app] + +. Complete the following fields, based on your planned installation: +** *Homepage URL*: The URL of your planned CircleCI installation. +** *Authorization callback URL*: The authorization callback URL is the URL of your planned CircleCI installation followed by `/auth/github` + +. Once completed, you will be shown the *Client ID*. Select *Generate a new Client Secret* to generate a Client Secret for your new OAuth App. You will need these values when you configure CircleCI server. ++ +.Client ID and Secret +image::guides:ROOT:github-clientid.png[Screenshot showing GitHub Client ID] + +[#github-enterprise] +=== GitHub Enterprise + +If using GitHub Enterprise, you also need a personal access token and the domain name of your GitHub Enterprise instance. + +Create the `defaultToken` by navigating to **User Settings > Developer Settings > Personal access tokens**. The default token requires no scopes. You will need this value when you configure CircleCI server. + +[#static-ip-address] +== 4. Static IP addresses +It is recommended to provision a static IP address to assign to the load balancer created by the cluster. While this is not necessary, it does eliminate the need to update DNS records if the service-created load balancer is reprovisioned. + +// Don't include this section in the AWS page. +ifndef::env-aws[] +[#gcp-reserve-a-static-external-ip-address] +=== GCP: Reserve a static external IP address +The link:https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address#external-ip[Google Cloud docs] provide information on how reserve an IP address. + +Make note of the returned IPv4 address for use later in the `values.yaml` file. + +endif::env-aws[] + +// Don't include this section in the GCP page. +ifndef::env-gcp[] +[#aws-reserve-an-elastic-ip-address] +=== AWS: Reserve an elastic IP address +To reserve an elastic IP address in AWS, run the following AWS CLI commands in your desired environment. + +This command needs to be run to generate an address for every subnet the load balancer deploys into - default 3. + +[source,console,role="nohljsln"] +---- +# Run x times per x subnets (default 3) +aws ec2 allocate-address + +# { +# "PublicIp": "10.0.0.1, +# "AllocationId": "eipalloc-12345", +# "PublicIpv4Pool": "amazon", +# "NetworkBorderGroup": "us-east-1", +# "Domain": "vpc" +#} +---- + +Make note of each of the returned `AllocationId` values from the CLI for use in the `values.yaml` file. + +endif::env-gcp[] + +[#frontend-tls-certificates] +== 5. Frontend TLS certificates +By default, CircleCI server creates self-signed certificates to get you started. In production, you should supply a certificate from a trusted certificate authority. The link:https://letsencrypt.org/[Let's Encrypt] certificate authority, for example, can issue a free certificate using their link:https://certbot.eff.org/[Certbot] tool. The sections below cover using Google Cloud DNS and AWS Route 53. + +CAUTION: It is important that your certificate contains both your domain and the `app.*` subdomain as subjects. For example, if you host your installation at `server.example.com`, your certificate must cover `app.server.example.com` and `server.example.com`. + +Create your certificates using one of the methods described below. Then use the following commands to retrieve the certificates later when you need them during this installation: + +[source,console] +---- +$ ls -l /etc/letsencrypt/live/ +---- + +[source,console] +---- +$ cat /etc/letsencrypt/live//fullchain.pem +---- + +[source,console] +---- +$ cat /etc/letsencrypt/live//privkey.pem +---- + +// Do not include in GCP page +ifndef::env-gcp[] + +[#aws-route-53] +=== AWS Route 53 + +. If you are using AWS Route 53 for DNS, you need the `certbot-route53` plugin installed. You can install the plugin with the following command: ++ +[source,console] +---- +$ python3 -m pip install certbot-dns-route53 +---- + +. Then execute this example to create a private key and certificate (including intermediate certificates) locally in `/etc/letsencrypt/live/`: ++ +[source,console] +---- +$ certbot certonly --dns-route53 -d "" -d "app." +---- + +endif::env-gcp[] + +// Do not include in AWS page +ifndef::env-aws[] + +[#google-cloud-dns] +=== Google Cloud DNS + +. If you host your DNS on Google Cloud, you need the `certbot-dns-google` plugin installed. You can install the plugin with the following command: ++ +[source,console] +---- +$ python3 -m pip install certbot-dns-google +---- +. The service account used to run `certbot` will need to have access to Cloud DNS in order to provision the necessary records used by Let's Encrypt for domain validation. +.. Create a service account: ++ +[source,console] +---- +$ gcloud iam service-accounts create --description="" \ + --display-name="" +---- +.. Retrieve credentials for the service account: ++ +[source,console] +---- +$ gcloud iam service-accounts keys create \ + --iam-account @.iam.gserviceaccount.com +---- +.. Create a custom role for Certbot: ++ +[source,console] +---- +$ gcloud iam roles create certbot --project= \ + --title="" --description="<DESCRIPTION>" \ + --permissions="dns.changes.create,dns.changes.get,dns.changes.list,dns.managedZones.get,dns.managedZones.list,dns.resourceRecordSets.create,dns.resourceRecordSets.delete,dns.resourceRecordSets.list,dns.resourceRecordSets.update" \ + --stage=ALPHA +---- +.. Bind the new role to the service account which we created earlier: ++ +[source,console] +---- +$ gcloud projects add-iam-policy-binding <PROJECT_ID> \ + --member="serviceAccount:<SERVICE_ACCOUNT_ID>@<PROJECT_ID>.iam.gserviceaccount.com" \ + --role="<ROLE_NAME>" +---- +. Finally, the following commands will provision a certification for your installation: ++ +[source,console] +---- +$ certbot certonly --dns-google --dns-google-credentials <KEY_FILE> -d "<CIRCLECI_SERVER_DOMAIN>" -d "app.<CIRCLECI_SERVER_DOMAIN>" +---- + +endif::env-aws[] + +// Do not include in GCP page +ifndef::env-gcp[] + +[#aws-certmanager] +=== AWS Certificate Manager +Instead of provisioning your own TLS certificates, if you are setting up CircleCI server in an AWS environment, you can have AWS provision TLS certificates using Certificate Manager. + +[source,console] +---- +$ aws acm request-certificate \ + --domain-name <CIRCLECI_SERVER_DOMAIN> \ + --subject-alternative-names app.<CIRCLECI_SERVER_DOMAIN> \ + --validation-method DNS \ + --idempotency-token circle +---- + +After running this command, navigate to the Certificate Manager AWS console and follow the wizard to provision the required DNS validation records with Route53. Take note of the ARN of the certificate once it is issued. + +endif::env-gcp[] + +[#upstream-tls] +=== Upstream TLS termination +You may have a requirement to terminate TLS for CircleCI server outside the application. This termination option is an alternate method to using ACM or supplying the certificate chain during Helm deployment. An example would be a proxy running in front of the CircleCI installation providing TLS termination for your domain name. In this case the CircleCI application acts as the backend for your load balancer or proxy. + +CircleCI server listens on the following port numbers, which need to be configured depending how you are routing the traffic: + +* Frontend / API Gateway [TCP 80, 443] +* Nomad server [TCP 4647] + +Depending on your requirements you may choose to terminate TLS for only the `frontend/api-gateway` or provide TLS for services listening on all the ports. + +[#encryption-signing-keys] +== 6. Encryption/signing keys +The keysets generated in this section are used to encrypt and sign artifacts generated by CircleCI. You will need these values to configure server. + +CAUTION: Store these values securely. If they are lost, job history and artifacts will not be recoverable. + +[#artifact-signing-key] +=== a. Artifact signing key +To generate an artifact signing key, run the following command: + +[source,console] +---- +$ docker run circleci/server-keysets:latest generate signing -a stdout +---- + +[#encryption-signing-key] +=== b. Encryption signing key +To generate an encryption signing key, run the following command: + +[source,console] +---- +$ docker run circleci/server-keysets:latest generate encryption -a stdout +---- + +[#object-storage-and-permissions] +== 7. Object storage and permissions +CircleCI server 4.9 hosts build artifacts, test results, and other state object storage. The following storage options are supported: + +* link:https://aws.amazon.com/s3/[AWS S3] + +* link:https://cloud.google.com/storage/[Google Cloud Storage] + +* link:https://min.io/[MinIO] + +While any S3 compatible object storage may work, we test and support AWS S3 and MinIO. Follow the instructions below to create a bucket and access method for AWS S3 or GCS. + +If you are installing locally rather than in AWS or GCP, follow the MinIO instructions in the xref:server-admin:air-gapped-installation:phase-2-configure-object-storage.adoc#[Air-gapped installation docs]. + +NOTE: If you are installing behind a proxy, object storage should be behind this proxy also. Otherwise, proxy details will need to be supplied at the job level within every project `.circleci/config.yml` to allow artifacts, test results, cache save and restore, and workspaces to work. For more information see the xref:server-admin:installation:installing-server-behind-a-proxy.adoc#[Installing server behind a proxy] guide. + +// Do not include in GCP page. +ifndef::env-gcp[] + +[#s3-storage] +=== AWS S3 storage + +[#create-aws-s3-bucket] +==== a. Create AWS S3 bucket + +[source,console] +---- +$ aws s3api create-bucket \ + --bucket <YOUR_BUCKET_NAME> \ + --region <YOUR_REGION> \ + --create-bucket-configuration LocationConstraint=<YOUR_REGION> +---- + +==== b. Enable bucket versioning + +To use the link:https://circleci.com/docs/docker-layer-caching/[Docker layer caching] (DLC) feature in CircleCI, link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html[bucket versioning] needs to be enabled. Run the following command to enable bucket versioning on the bucket created in the previous step: + +[source,console] +---- +$ aws s3api put-bucket-versioning \ + --bucket <YOUR_BUCKET_NAME> \ + --region <YOUR_REGION> \ + --versioning-configuration Status=Enabled +---- + +[#set-up-authentication-aws] +==== c. Set up authentication + +Authenticate CircleCI with S3 in one of two ways: + +* IAM Roles for Service Accounts (IRSA) - **recommended** +* IAM access keys + +[tabs] +==== +IRSA:: ++ +-- +**Option 1:** IRSA + +The following is a summary of link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[AWS's Documentation on IRSA] that is sufficient for installing CircleCI. + +. Create an IAM OIDC Identity Provider for your EKS Cluster. ++ +[source,console] +---- +$ eksctl utils associate-iam-oidc-provider --cluster <CLUSTER_NAME> --approve +---- + +. Get the OIDC provider ARN. You will need this in later steps. ++ +[source,console] +---- +$ aws iam list-open-id-connect-providers | grep $(aws eks describe-cluster --name <CLUSTER_NAME> --query "cluster.identity.oidc.issuer" --output text | awk -F'/' '{print $NF}') +---- + +. Get your OIDC provider URL. You will need this in later steps. ++ +[source,console] +---- +$ aws eks describe-cluster --name <CLUSTER_NAME> --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///" +---- + +. Create the role using the command and trust policy template below. You will need the Role ARN and name in later steps. ++ +[source,console] +---- +$ aws iam create-role --role-name circleci-s3 --assume-role-policy-document file://<TRUST_POLICY_FILE> +---- ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "<OIDC_PROVIDER_ARN>" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "<OIDC_PROVIDER_URL>:sub": "system:serviceaccount:<K8S_NAMESPACE>:object-storage" + } + } + } + ] +} +---- ++ +NOTE: If you wish to store artifacts which are larger than 5GB, you will need to disable presigned mode. To do this you will need your IRSA role to assume itself. Replace your trust policy above with the contents below. ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "<OIDC_PROVIDER_ARN>" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "<OIDC_PROVIDER_URL>:sub": "system:serviceaccount:<K8S_NAMESPACE>:object-storage" + } + } + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "<ROLE_ARN>" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- + +. Create the policy using the command and template below. Fill in the bucket name and the role ARN. ++ +[source,console] +---- +$ aws iam create-policy --policy-name circleci-s3 --policy-document file://<POLICY_FILE> +---- ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutAnalyticsConfiguration", + "s3:GetObjectVersionTagging", + "s3:CreateBucket", + "s3:GetObjectAcl", + "s3:GetBucketObjectLockConfiguration", + "s3:DeleteBucketWebsite", + "s3:PutLifecycleConfiguration", + "s3:GetObjectVersionAcl", + "s3:PutObjectTagging", + "s3:DeleteObject", + "s3:DeleteObjectTagging", + "s3:GetBucketPolicyStatus", + "s3:GetObjectRetention", + "s3:GetBucketWebsite", + "s3:GetJobTagging", + "s3:DeleteObjectVersionTagging", + "s3:PutObjectLegalHold", + "s3:GetObjectLegalHold", + "s3:GetBucketNotification", + "s3:PutBucketCORS", + "s3:GetReplicationConfiguration", + "s3:ListMultipartUploadParts", + "s3:PutObject", + "s3:GetObject", + "s3:PutBucketNotification", + "s3:DescribeJob", + "s3:PutBucketLogging", + "s3:GetAnalyticsConfiguration", + "s3:PutBucketObjectLockConfiguration", + "s3:GetObjectVersionForReplication", + "s3:GetLifecycleConfiguration", + "s3:GetInventoryConfiguration", + "s3:GetBucketTagging", + "s3:PutAccelerateConfiguration", + "s3:DeleteObjectVersion", + "s3:GetBucketLogging", + "s3:ListBucketVersions", + "s3:ReplicateTags", + "s3:RestoreObject", + "s3:ListBucket", + "s3:GetAccelerateConfiguration", + "s3:GetBucketPolicy", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:GetObjectVersionTorrent", + "s3:AbortMultipartUpload", + "s3:PutBucketTagging", + "s3:GetBucketRequestPayment", + "s3:GetAccessPointPolicyStatus", + "s3:GetObjectTagging", + "s3:GetMetricsConfiguration", + "s3:PutBucketVersioning", + "s3:GetBucketPublicAccessBlock", + "s3:ListBucketMultipartUploads", + "s3:PutMetricsConfiguration", + "s3:PutObjectVersionTagging", + "s3:GetBucketVersioning", + "s3:GetBucketAcl", + "s3:PutInventoryConfiguration", + "s3:GetObjectTorrent", + "s3:PutBucketWebsite", + "s3:PutBucketRequestPayment", + "s3:PutObjectRetention", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetAccessPointPolicy", + "s3:GetObjectVersion", + "s3:GetAccessPoint", + "s3:GetAccountPublicAccessBlock", + "s3:ListAllMyBuckets", + "s3:ListAccessPoints", + "s3:ListJobs" + ], + "Resource": [ + "arn:aws:s3:::<YOUR_BUCKET_NAME>", + "arn:aws:s3:::<YOUR_BUCKET_NAME>/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "sts:AssumeRole" + ], + "Resource": "<OBJECT_STORAGE_ROLE_ARN>" + } + ] +} +---- + +. Attach the policy to the role: ++ +[source,console] +---- +$ aws iam attach-role-policy --role-name <OBJECT_STORAGE_ROLE_NAME> --policy-arn=<STORAGE_POLICY_ARN> +---- +-- +IAM access keys:: ++ +-- +**Option 2:** IAM access keys + +NOTE: If you wish to store artifacts which are larger than 5GB, you will need to disable presigned mode which requires an AWS role. Disabling presigned mode will enable the use of muli-part uploads to S3 which can support larger files and potentially faster transfers. We recommend you follow the instructions for creating an IRSA role in this case. + +. Create an IAM user for CircleCI server. ++ +[source,console] +---- +$ aws iam create-user --user-name circleci-server +---- + +. Create a policy document `policy.json`. ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutAnalyticsConfiguration", + "s3:GetObjectVersionTagging", + "s3:CreateBucket", + "s3:GetObjectAcl", + "s3:GetBucketObjectLockConfiguration", + "s3:DeleteBucketWebsite", + "s3:PutLifecycleConfiguration", + "s3:GetObjectVersionAcl", + "s3:PutObjectTagging", + "s3:DeleteObject", + "s3:DeleteObjectTagging", + "s3:GetBucketPolicyStatus", + "s3:GetObjectRetention", + "s3:GetBucketWebsite", + "s3:GetJobTagging", + "s3:DeleteObjectVersionTagging", + "s3:PutObjectLegalHold", + "s3:GetObjectLegalHold", + "s3:GetBucketNotification", + "s3:PutBucketCORS", + "s3:GetReplicationConfiguration", + "s3:ListMultipartUploadParts", + "s3:PutObject", + "s3:GetObject", + "s3:PutBucketNotification", + "s3:DescribeJob", + "s3:PutBucketLogging", + "s3:GetAnalyticsConfiguration", + "s3:PutBucketObjectLockConfiguration", + "s3:GetObjectVersionForReplication", + "s3:GetLifecycleConfiguration", + "s3:GetInventoryConfiguration", + "s3:GetBucketTagging", + "s3:PutAccelerateConfiguration", + "s3:DeleteObjectVersion", + "s3:GetBucketLogging", + "s3:ListBucketVersions", + "s3:ReplicateTags", + "s3:RestoreObject", + "s3:ListBucket", + "s3:GetAccelerateConfiguration", + "s3:GetBucketPolicy", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:GetObjectVersionTorrent", + "s3:AbortMultipartUpload", + "s3:PutBucketTagging", + "s3:GetBucketRequestPayment", + "s3:GetAccessPointPolicyStatus", + "s3:GetObjectTagging", + "s3:GetMetricsConfiguration", + "s3:PutBucketVersioning", + "s3:GetBucketPublicAccessBlock", + "s3:ListBucketMultipartUploads", + "s3:PutMetricsConfiguration", + "s3:PutObjectVersionTagging", + "s3:GetBucketVersioning", + "s3:GetBucketAcl", + "s3:PutInventoryConfiguration", + "s3:GetObjectTorrent", + "s3:PutBucketWebsite", + "s3:PutBucketRequestPayment", + "s3:PutObjectRetention", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetAccessPointPolicy", + "s3:GetObjectVersion", + "s3:GetAccessPoint", + "s3:GetAccountPublicAccessBlock", + "s3:ListAllMyBuckets", + "s3:ListAccessPoints", + "s3:ListJobs" + ], + "Resource": [ + "arn:aws:s3:::<YOUR_BUCKET_NAME>", + "arn:aws:s3:::<YOUR_BUCKET_NAME>/*" + ] + } + ] +} +---- + +. Attach policy to user. ++ +[source,console] +---- +$ aws iam put-user-policy \ + --user-name circleci-server \ + --policy-name circleci-server \ + --policy-document file://policy.json +---- + +. Create Access Key for user `circleci-server`. ++ +NOTE: You will need this when you configure your server installation later. ++ +[source,console] +---- +$ aws iam create-access-key --user-name circleci-server +---- ++ +The result should look like this: ++ +[source, json] +---- +{ + "AccessKey": { + "UserName": "circleci-server", + "Status": "Active", + "CreateDate": "2017-07-31T22:24:41.576Z", + "SecretAccessKey": <AWS_SECRET_ACCESS_KEY>, + "AccessKeyId": <AWS_ACCESS_KEY_ID> + } +} +---- +-- +==== + +endif::env-gcp[] + +// Do not include in AWS page +ifndef::env-aws[] + +[#google-cloud-storage] +=== Google Cloud Storage + +[#create-a-gcp-bucket] +==== a. Create a GCP bucket +If your server installation runs in a GKE cluster, ensure that your current IAM user is cluster admin. RBAC (role-based access control) objects need to be created so admin permissions are required. More information can be found in the link:https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control[GKE documentation]. + +[source,console] +---- +$ gsutil mb gs://circleci-server-bucket +---- + +[#set-up-authentication-gcp] +==== b. Set up authentication +The recommended method for workload/pod authentication is to use link:https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity[Workload Identity]. However, you may also use static credentials (JSON key file). + +. Create a Service Account. ++ +[source,console] +---- +$ gcloud iam service-accounts create circleci-storage --description="Service account for CircleCI object storage" --display-name="circleci-storage" +---- + +. Bind the `objectAdmin` role to the service account. ++ +[source,console] +---- +$ gcloud projects add-iam-policy-binding <PROJECT_ID> \ + --member="serviceAccount:circleci-storage@<PROJECT_ID>.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" \ + --condition='expression=resource.name.startsWith("projects/_/buckets/circleci-server-bucket"),title=restrict_bucket' +---- + +. Either enable Workload Identity or use static credentials. ++ +[tabs] +==== +Workload Identity:: ++ +-- +**Option 1:** Workload Identity + +When using Workload Identity you need to configure your account such that the workloads/pods can access the storage bucket from the cluster using the Kubernetes service account `"<K8S_NAMESPACE>/object-storage"`. + +[source,console] +---- +$ gcloud projects add-iam-policy-binding <PROJECT_ID> \ + --member serviceAccount:circleci-storage@<PROJECT_ID>.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --condition=None + +$ gcloud iam service-accounts add-iam-policy-binding circleci-storage@<PROJECT_ID>.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:<PROJECT_ID>.svc.id.goog[<K8S_NAMESPACE>/object-storage]" +---- + +[source,console] +---- +$ gcloud projects add-iam-policy-binding <PROJECT_ID> \ + --member serviceAccount:circleci-storage@<PROJECT_ID>.iam.gserviceaccount.com \ + --role roles/iam.serviceAccountTokenCreator \ + --condition=None +---- +-- +Static credentials:: ++ +-- +**Option 2:** Static credentials + +If you are not using Workload Identity, create a JSON file containing static credentials. + +[source,console] +---- +$ gcloud iam service-accounts keys create <KEY_FILE> \ + --iam-account circleci-storage@<PROJECT_ID>.iam.gserviceaccount.com +---- +-- +==== + +endif::env-aws[] + +ifndef::env-gcp[] + +== 8. Externalize RabbitMQ + +We use RabbitMQ for workflow messaging, test results, usage, cron tasks, output, notifications, and scheduler. + +You can choose to disable the deployment of the internal RabbitMQ instance and instead use your own RabbitMQ instance or the link:https://aws.amazon.com/amazon-mq/[Amazon MQ message broker service]. + +To choose this option make sure you have your RabbitMQ or Amazon MQ instance available for the next phase of the installation. In the next phase you can then update your `values.yaml` file to use your own RabbitMQ or Amazon MQ instance instead of the internal RabbitMQ instance. The relevant configuration in this case is the following: + +[source,yaml] +---- +rabbitmq.host: <your-host-connection-url> +rabbitmq.internal: false +---- +endif::env-gcp[] + +ifndef::pdf[] +[#next-steps] +== Next steps +ifndef::env-aws[] +* xref:server-admin:installation:phase-2-gcp-core-services.adoc#[Phase 2 GCP: Core Services Installation]. +endif::env-aws[] + +ifndef::env-gcp[] +* xref:server-admin:installation:phase-2-aws-core-services.adoc#[Phase 2 AWS: Core Services Installation]. +endif::env-gcp[] + + +endif::[] diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc new file mode 100644 index 0000000000..ca9442ecce --- /dev/null +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc @@ -0,0 +1,1005 @@ +Before you begin with the CircleCI server 4.9 core services installation phase, ensure all prerequisites are met, as described in the following page: + +ifndef::env-gcp[] +* xref:server-admin:installation:phase-1-aws-prerequisites.adoc#[Phase 1 - AWS Prerequisites] +endif::env-gcp[] + +ifndef::env-aws[] +* xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#[Phase 1 - GCP Prerequisites] +endif::env-aws[] + +NOTE: In the following sections, replace any sections indicated by `< >` with your details. + +[#create-a-namespace] +== 1. Create a namespace +Create a namespace to install the application into. + +[source,shell] +---- +kubectl create ns <namespace> +---- + +TIP: Once you have created your namespace, we recommend setting your `kubectl` context too, with the following command: `kubectl config set-context --current --namespace <namespace>` + +[#pull-images] +== 2. Pull images + +Credentials to pull the images from CircleCI's image registry will be provided to you as part of the onboarding process. A `docker-registry` Kubernetes Secret will be used to pull images from Azure Container Registry (ACR). You have two options, depending on whether your application has access to the public internet. + +[tabs] +==== +Public:: ++ +-- +**Option 1:** Your application has access to the public internet. + +This example creates a Kubernetes Secret to enable deployments to pull images from CircleCI's image registry. The `docker-registry` Kubernetes Secret takes the following form: + +[source,shell] +---- +kubectl -n <namespace> create secret docker-registry regcred \ + --docker-server=cciserver.azurecr.io \ + --docker-username=<your-username> \ + --docker-password="<provided-token>" \ + --docker-email=<your-contact-email> +---- +-- +Private:: ++ +-- +**Option 2:** Your application does NOT have access to the public internet. + +The credentials provided to you allow you to pull and store copies of our images locally. Pull and store the images in whichever Docker repository you have available. The `docker-registry` Kubernetes Secret takes the following form: + +[source,shell] +---- +kubectl -n <namespace> create secret docker-registry regcred \ + --docker-server=<your-docker-image-repo> \ + --docker-username=<your-username> \ + --docker-password=<your-access-token> \ + --docker-email=<your-email> +---- +-- +==== + +[#create-helm-values] +== 3. Create Helm values + +Before installing CircleCI, it is recommended to create a new `values.yaml` file unique to your installation. xref:server-admin:installation:installation-reference.adoc#example-manifests[The Installation Reference section] contains some example `values.yaml` files that are a good place to start. The following describes the minimum required values to include in `values.yaml`. Additional customizations are available, see the provided `values.yaml` for all available options. + +For sensitive data there are two options: + +* Add into the `values.yaml` file +* Add them as Kubernetes Secrets directly + +This flexibility allows you to manage Kubernetes Secrets using whichever process you prefer. Whichever option you choose, this sensitive information is stored as a Kubernetes Secret within CircleCI. + +NOTE: During the installation process, you may use the following command to generate a random alphanumeric value as required: `cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | head -c <num-of-chars>`. This command should work on any *nix based system. + +[#api-token] +=== a. API token + +The application requires a Kubernetes Secret containing an API token. This API token is used to facilitate internal API communication to api-service. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic api-token \ + --from-literal=api-token=<your-super-secret-random-value> + +kubectl -n <namespace> annotate secret/api-token \ + meta.helm.sh/release-name=<helm-release-name> \ + meta.helm.sh/release-namespace=<namespace> \ + helm.sh/resource-policy=keep --overwrite + +kubectl -n <namespace> label secret/api-token \ + app.kubernetes.io/managed-by=Helm --overwrite +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret for you. + +CircleCI will create the Kubernetes Secret "api-token" automatically. + +-- +==== + +[#session-cookie] +=== b. Session cookie + +The application requires a session cookie key Kubernetes Secret, which CircleCI uses to sign session cookies. The Secret must be exactly 16 characters long. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic session-cookie \ +--from-literal=session-cookie-key=<your-secret-key-16-chars> + +kubectl -n <namespace> annotate secret/session-cookie \ + meta.helm.sh/release-name=<helm-release-name> \ + meta.helm.sh/release-namespace=<namespace> \ + helm.sh/resource-policy=keep --overwrite + +kubectl -n <namespace> label secret/session-cookie \ + app.kubernetes.io/managed-by=Helm --overwrite +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret for you. + +CircleCI will create the Kubernetes Secret "session-cookie" automatically. + +-- +==== + +[#encryption] +=== c. Encryption + +The application requires a Kubernetes Secret containing signing and encryption keysets. These keysets are used to encrypt and sign artifacts generated by CircleCI. These keys were created during the prerequisites phase (xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#encryption-signing-keys[GCP prerequisites], xref:server-admin:installation:phase-1-aws-prerequisites.adoc#encryption-signing-keys[AWS prerequisites]). CircleCI will not be able to recover the values if lost. Depending on how you prefer to manage Kubernetes Secrets, there are two options. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic signing-keys \ + --from-literal=signing-key=<your-generated-signing-key> \ + --from-literal=encryption-key=<your-generated-encryption-key> +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add the value to `values.yaml`. CircleCI will create the Secret automatically. + +[source,yaml] +---- +keyset: + signing: '<your-generated-signing-key>' + encryption: '<your-generated-encryption-key>' +---- +-- +==== + +[#postgres] +=== d. PostgreSQL + +[#postgres-credentials] +==== i. Credentials +The application requires a Kubernetes Secret containing PostgreSQL credentials. This is true when using either the internal (default) or an externally hosted instance of PostgreSQL. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic postgresql \ + --from-literal=postgres-password=<postgres-password> +---- + +You must then provide the following to the `values.yaml` file: + +[source,yaml] +---- +postgresql: + auth: + existingSecret: postgresql +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add the credentials to `values.yaml`, and CircleCI will create the Secret automatically. + +[source,yaml] +---- +postgresql: + auth: + postgresPassword: "<postgres-password>" +---- +-- +==== + +[#postgres-tls] +==== ii. TLS +PostgreSQL may be extended to use TLS encrypted traffic. When deployed internally, this option is disabled by default but may be enabled by adding the following to your PostgreSQL block of your `values.yaml` + +[source,yaml] +---- +postgresql: + ... + tls: + enabled: true + autoGenerated: true # Generate automatically self-signed TLS certificates +---- + +Certificate files may also be provided, rather than autogenerated. In this case, create a Secret containing the TLS certs and keys needed. + +[source,yaml] +---- +kubectl -n <namespace> create secret generic postgres-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +---- + +Then the PostgreSQL block in your `values.yaml` will contain the contents below. + +[source,yaml] +---- +postgresql: + ... + tls: + enabled: true + certificatesSecret: "postgres-tls-secret" # Name of an existing secret that contains the certificates + certFilename: "cert.pem" # Certificate filename + certKeyFilename: "cert.key" # Certificate key filename + certCAFilename: "ca.pem" # CA Certificate filename +---- + +=== e. MongoDB credentials + +The application requires a Kubernetes Secret containing MongoDB credentials. This is true when using either the internal (default) or an externally hosted instance of MongoDB. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic mongodb-credentials \ + --from-literal=mongodb-root-password=<root-password> \ + --from-literal=mongodb-password=<user-password> +---- + +You must then provide the following to the `values.yaml` file: + +[source,yaml] +---- +mongodb: + auth: + existingSecret: mongodb-credentials +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add the credentials to `values.yaml`, and CircleCI will create the Secret automatically. + +[source,yaml] +---- +mongodb: + auth: + rootPassword: "<root-password>" + password: "<user-password>" +---- +-- +==== + +[#rabbinmq-configurations-and-auth-secrets] +=== f. RabbitMQ configurations and auth Secrets + +The RabbitMQ installation requires two random alphanumeric strings. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic rabbitmq-key \ +--from-literal=rabbitmq-password=<secret-alphanumeric-password> \ +--from-literal=rabbitmq-erlang-cookie=<secret-alphanumeric-key> +---- + +You must then provide the following to the `values.yaml` file: + +[source,yaml] +---- +rabbitmq: + auth: + existingPasswordSecret: rabbitmq-key + existingErlangSecret: rabbitmq-key +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add the value to `values.yaml`, and CircleCI will create the Kubernetes Secret automatically. + +[source,yaml] +---- +rabbitmq: + auth: + password: "<secret-alphanumeric-password>" + erlangCookie: "<secret-alphanumeric-key>" +---- +-- +==== + +[#pusher-kubernetes-secret] +=== g. Pusher Kubernetes Secret +The application requires a Kubernetes Secret for Pusher. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are 2 options: + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic pusher \ +--from-literal=secret=<pusher-secret> + +kubectl -n <namespace> annotate secret/pusher \ + meta.helm.sh/release-name=<helm-release-name> \ + meta.helm.sh/release-namespace=<namespace> \ + helm.sh/resource-policy=keep --overwrite + +kubectl -n <namespace> label secret/pusher \ + app.kubernetes.io/managed-by=Helm --overwrite +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +CircleCI will create the Kubernetes Secret `pusher` automatically. + +-- +==== + +[#global] +=== h. Global +All values in this section are children of `global` in your `values.yaml`. + +[#circleci-domain-name] +==== CircleCI domain name (required) +Enter the domain name you specified when creating your Frontend TLS key and certificate (xref:server-admin:installation:phase-1-aws-prerequisites.adoc#frontend-tls-certificates[AWS], xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#frontend-tls-certificates[GCP]). + +[source,yaml] +---- +global: + ... + domainName: "<full-domain-name-of-your-install>" +---- + +[#license] +==== License +A license will be provided by CircleCI, add it to `values.yaml`: + +[source,yaml] +---- +global: + ... + license: '<license>' +---- + +[#Registry] +==== Registry +The registry to pull images from will have been provided to you, or you may have added the images to your own hosted registry. Add the registry to `values.yaml`: + +[source,yaml] +---- +global: + ... + container: + registry: <registry-domain eg: cciserver.azurecr.io > + org: <your-org-if-applicable> +---- + + +// Don't show in AWS page +ifndef::env-aws[] + +[#gcp-add-static-ip] +=== i. Add static IP address + +If you provisioned a Static IP (GCP) in the prerequisites, you can now add the value under the nginx block. + +For GCP, add the provisioned IPv4 address under the `loadBalancerIp` field in the nginx block. + +[source,yaml] +---- +nginx: + ... + loadBalancerIp: "<gcp-provisioned-ipv4-address>" + +---- + +endif::env-aws[] + +// Don't show in GCP page +ifndef::env-gcp[] + +[#aws-add-elastic-ip] +=== i. Add Elastic IP addresses + +If you provisioned an Elastic IP addresses (AWS) in the prerequisites, you can now add the values under the nginx block. + +For AWS, under nginx annotations, add the `service.beta.kubernetes.io/aws-load-balancer-eip-allocations` annotation with each of the `AllocationId` values generated as a comma separated list. The number of `AllocationId`s must match the number of subnets the load balancer is deployed into (default 3). + +[source,yaml] +---- +nginx: + ... + annotations: + ... + service.beta.kubernetes.io/aws-load-balancer-eip-allocations: <eip-id-1>,<eip-id-2>,<eip-id-3> +---- + +endif::env-gcp[] + +[#tls] +=== j. TLS +For TLS, you have a few options: + +[tabs] +==== +Lets Encrypt:: ++ +-- +*Let's Encrypt* + +https://letsencrypt.org/[Let's Encrypt] will request and manage certificates for you. This is a good option when the load balancer is publicly accessible. The following snippet (using your own email) can be added to `values.yaml`: + +[source,yaml] +---- +kong: + acme: + enabled: true + email: contact@example.com +---- + +NOTE: Let's Encrypt may take up to 30 minutes to be reflected in your browser. +-- ++ +Key & certificate:: ++ +-- +*Supply a private key and certificate* + +You can supply a private key and certificate, which you may have created during the prerequisites steps. The key and certificates will need to be base64 encoded. You can retrieve and encode the values with the following commands: + +[source,bash] +---- +cat /etc/letsencrypt/live/<CIRCLECI_SERVER_DOMAIN>/privkey.pem | base64 +cat /etc/letsencrypt/live/<CIRCLECI_SERVER_DOMAIN>/fullchain.pem | base64 +---- + +And add them to `values.yaml`: + +[source,yaml] +---- +tls: + certificate: '<full-chain>' + privateKey: '<private-key>' +---- +-- ++ +// Don't show in GCP page +ifndef::env-gcp[] +AWS Certificate Manager:: ++ +-- +*Use ACM* + +Have link:https://docs.aws.amazon.com/acm/latest/userguide/acm-overview.html[AWS Certificate Manager (ACM)] automatically request and manage certificates for you. Follow the link:https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html[ACM documentation] for instructions on how to generate ACM certificates. + +Enable `aws_acm` and add the `service.beta.kubernetes.io/aws-load-balancer-ssl-cert` annotation to point at the ACM ARN: + +[source,yaml] +---- +nginx: + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: <acm-arn> + aws_acm: + enabled: true +---- + +**** +If you have already deployed CircleCI server, enabling ACM is a destructive change to the load balancer. The service will have to be regenerated to allow the use of your ACM certificates and so the associated load balancer will also be regenerated. +You will need to update your DNS records to the new load balancer once you have redeployed CircleCI server. +**** +-- +endif::env-gcp[] +Terminate TLS:: ++ +-- +*Disable TLS within CircleCI* + +You can choose to disable TLS termination within CircleCI. The system will still need to be accessed over HTTPS, so TLS termination will be required somewhere upstream of CircleCI. Implement this by following the first option (do nothing) and forward the following ports to your CircleCI load balancer: + +* Frontend / API Gateway [TCP 80, 443] +* Nomad server [TCP 4647] + +-- +==== + +[#github-integration] +=== k. GitHub integration +To configure GitHub with CircleCI, there are two options for providing credentials to the deployment. Steps for both GitHub and GitHub Enterprise (GHE) are given in the next two sections. + +[#github] +==== i. GitHub +These instructions are for the GitHub.com, **not** GitHub Enterprise. Use the client ID and secret you created with your GitHub OAuth application in the prerequisites phase (xref:server-admin:installation:phase-1-aws-prerequisites.adoc#create-a-new-github-oauth-app[AWS], xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#create-a-new-github-oauth-app[GCP]). + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic github-secret \ + --from-literal=clientId=<client-id> \ + --from-literal=clientSecret=<client-secret> +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add the client ID and Secret to the `values.yaml` file. CircleCI will create the Kubernetes Secret automatically. + +[source,yaml] +---- +github: + clientId: "<client-id>" + clientSecret: "<client-secret>" +---- +-- +==== + +[#github-enterprise-integration] +==== ii. GitHub Enterprise + +The instructions for GitHub Enterprise are similar, with a few extra steps to enable Enterprise and create the required default token. + +In the case of GitHub Enterprise add the `defaultToken` created in the prerequisites phase (xref:server-admin:installation:phase-1-aws-prerequisites.adoc#create-a-new-github-oauth-app[AWS], xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#create-a-new-github-oauth-app[GCP]) to the `GitHub` section. The hostname should not include the protocol, ex: `github.exampleorg.com`. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic github-secret \ + --from-literal=clientId=<client-id> \ + --from-literal=clientSecret=<client-secret> \ + --from-literal=defaultToken=<default-token> +---- + +You must then provide the following to the `values.yaml` file: + +[source,yaml] +---- +github: + enterprise: true + hostname: "<github-enterprise-hostname>" +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +Add `clientID`, `clientSecret` and `defaultToken` to +the `values.yaml` file. You must also set `enterprise` to `true`, and provide the `hostname` for your enterprise GitHub. CircleCI will create the Kubernetes Secret automatically. + +[source,yaml] +---- +github: + ... + clientId: "<client-id>" + clientSecret: "<client-secret>" + enterprise: true + hostname: "<github-enterprise-hostname>" + defaultToken: "<token>" +---- +-- +==== + + +[#object-storage] +=== l. Object storage + +ifndef::env-gcp[] +Regardless of your storage provider, the bucket name you xref:server-admin:installation:phase-1-aws-prerequisites.adoc#object-storage-and-permissions[created during the prerequisites phase] will need to be included. +endif::env-gcp[] + +ifndef::env-aws[] +Regardless of your storage provider, the bucket name you xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#object-storage-and-permissions[created during the prerequisites phase] will need to be included. +endif::env-aws[] + +[source,yaml] +---- +object_storage: + bucketName: "<bucket-name>" +---- + +// Don't include this section in the GCP PDF. +ifndef::env-gcp[] + +[#s3-compatible] +==== S3 compatible +Add an `s3` section as a child of `object_storage`. The `endpoint` in the case of AWS S3 is the link:https://docs.aws.amazon.com/general/latest/gr/rande.html[regional endpoint], it is of the form `https://s3.<region>.amazonaws.com`. Otherwise it is the API endpoint fo your object storage server. + +[source,yaml] +---- +object_storage: + ... + s3: + enabled: true + endpoint: "<storage-server-or-s3-endpoint>" +---- + +Under `object_storage.s3`, you may provide the `accessKey` and `secretKey`, the `irsaRole`, or nothing. They were created during the prerequisites steps. + +[tabs] +==== +Use IAM keys:: ++ +-- +**Option 1:** Use IAM keys. + +Add the following to the `object_storage.s3` section: + +[source,yaml] +---- +object_storage: + ... + s3: + ... + accessKey: "<access-key>" + secretKey: "<secret-key>" +---- +-- +Use IRSA:: ++ +-- +**Option 2:** Use IRSA. + +A Kubernetes Secret will automatically be generated for you using your credentials. + +Add the following to the `object_storage.s3` section: + +[source,yaml] +---- +object_storage: + ... + s3: + ... + region: "<role-region>" + irsaRole: "<irsa-arn>" +---- + +**Disable Presigned Mode (Optional)** +If you wish to store artifacts larger than 5GB, you will need to xref:installation:phase-1-aws-prerequisites.adoc#s3-storage[update your trust policy for your IRSA role]. Then disable presigned mode by adding the following to the `object_storage.s3` section: + +[source,yaml] +---- +object_storage: + ... + s3: + ... + presigned: false + storageRole: "<irsa-arn>" +---- +-- +You create Secret:: ++ +-- +**Option 3:** Create the Kubernetes Secret yourself + +Instead of providing AWS `accessKey` and `secretKey` credentials in your `values.yaml` file, you may choose to create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic object-storage-secret \ + --from-literal=s3AccessKey=<access-key> \ + --from-literal=s3SecretKey=<secret-key> +---- +-- +==== + +CircleCI server will use the credentials provided to authenticate to S3. + + +// Stop hiding from GCP PDF: +endif::env-gcp[] + +// Don't include this section in the AWS PDF: +ifndef::env-aws[] + +[#google-cloud-storage-object-storage] +==== Google Cloud Storage + +Under `object_storage` add the following. + +[source,yaml] +---- +gcs: + enabled: true +---- + +Under `object_storage.gcs` you may add `service_account`, `workloadIdentity`, or neither. The keys/role were created during the prerequisites steps. + +[tabs] +==== +Use service account:: ++ +-- +**Option 1:** Use a service account. + +Add a JSON format key of the Service Account to use for bucket access. Add the following to the `object_storage.gcs` section: + +[source,yaml] +---- +service_account: "<service-account>" +---- +-- +Use Workload Identity:: ++ +-- +**Option 2:** Use Workload Identity. + +Add the Service Account Email of the Workload Identity. Add the following to the `object_storage.gcs` section: + +[source,yaml] +---- +workloadIdentity: "<workload-identity-service-account-email>" +---- +-- +You create Secret:: ++ +-- +**Option 3:** Create the Kubernetes Secret yourself + +Instead of storing the service account in your `values.yaml` file, you may create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic object-storage-secret \ + --from-literal=gcs_sa.json=<service-account> +---- +-- +==== + +// Stop hiding from AWS PDF +endif::env-aws[] + +=== m. Installing behind a proxy +Depending on your security requirements, you might want to install CircleCI server behind a proxy. Installing behind a proxy gives you the power to monitor and control access between your installation and the broader Internet. For further information including limitations of installation behind a proxy, see the xref:server-admin:installation:installing-server-behind-a-proxy.adoc#[Installing server behind a proxy] guide. + +The following fields need to be configured in your `values.yaml`: + +* Toggle `proxy.enabled` to `"1"` +* Enter details for `proxy.http.host` and `proxy.https.host`, along with their associated ports. These values can be the same but they both need to be configured. +* For authentication you will need to configure `proxy.http.auth.enabled` and `proxy.https.auth.enabled` as `"1"`. You will also need to configure the respective username and password for both HTTP and HTTPS. +* configure the `no_proxy` hosts and subnets. This should include localhost, your GitHub Enterprise host (optional), the hostname of your CircleCI installation (see xref:server-admin:installation:installing-server-behind-a-proxy.adoc#known-limitations[Known Limitations] for an explanation), and the CIDR of Nomad. + +[source,yaml] +---- +proxy: + enabled: "1" + http: + host: "<proxy.example.internal>" + port: "3128" + auth: + enabled: "1" + username: "<proxy-user>" + password: "<proxy-password>" + https: + host: "<proxy.example.internal>" + port: "3128" + auth: + enabled: "1" + username: "<proxy-user>" + password: "<proxy-password>" + no_proxy: + - localhost + - 127.0.0.1 + - "<github.example.internal>" + - "<circleci.example.internal>" + - "<nomad-subnet-cidr>" + - "<vpc-or-subnet-cidr>" # VPC or subnets to exclude from the proxy (optional) +---- + +=== n. Encrypting environment variables + +All environment variables stored in contexts are encrypted using either https://developers.google.com/tink[Google Tink] or https://www.vaultproject.io/[HashiCorp Vault]. We recommend the use of Tink as Vault has been deprecated. + +==== Use Tink + +The following steps cover using Tink as an alternative to Vault: + +. Enable Tink in your `values.yaml`: ++ +[source,yaml] +---- +tink: + enabled: true + keyset: "" +---- ++ +When `tink.enabled` is true, Vault will not be deployed. ++ +WARNING: Tink or Vault must be set once during installation, and cannot be changed after deployment. + +. Generate a link:https://developers.google.com/tink/design/keysets[keyset], which Tink uses to manage key rotation. The easiest way to do this is to use Google's link:https://developers.google.com/tink/tinkey-overview[Tinkey] CLI utility. Once https://developers.google.com/tink/install-tinkey[installed], use the following command: ++ +[source,shell] +---- +tinkey create-keyset --key-template XCHACHA20_POLY1305 +---- + +. CircleCI server will store your generated keyset in a Kubernetes Secret. You may generate this Secret in either of the following ways: ++ +[tabs] +==== +You create secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself + +Following the example below, create a Kubernetes Secret with the name `tink` and a key `keyset`. Apply this Secret to the namespace of your CircleCI server installation. + +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: tink +data: + keyset: <your-keyset> +---- +-- ++ +CircleCI creates secret:: ++ +-- +**Option 2:** CircleCI server will create the Kubernetes Secret + +You may add the keyset to your `values.yaml` under `tink` as in the example below. CircleCI will generate the required Secret to store your keyset. + +[source,yaml] +---- +tink: + enabled: true + keyset: "<your-keyset>" +---- +-- +==== + +WARNING: If your Tink keyset is somehow lost, you will need to generate a new keyset and then recreate your contexts and their associated Secrets. + +=== o. Setting up OIDC (optional) + +CircleCI server optionally supports xref:guides:permissions-authentication:openid-connect-tokens.adoc#[OpenID Connect (OIDC) tokens] in jobs. + +This feature is not enabled by default and requires a few additional steps to set up, as follows: + +. Generate a https://mkjwk.org/[JSON Web Key (JWK)] using the default parameters and copy the `Public and Private Keypair Set` to a secure and convenient location (for example, `~/oidc-service-jwk.json`). This key pair will be used by `oidc-service` to sign the OIDC tokens used in jobs. + +. The JWK will need to be base64 encoded without line wrapping before using it on server: +** On Linux: ++ +[source,shell] +---- +$ base64 -w0 ~/oidc-service-jwk.json +---- +** On macOS: ++ +[source,shell] +---- +$ base64 -b0 ~/oidc-service-jwk.json +---- ++ +TIP: Copy the output to a secure location, so it can be referenced in the next step. + +. Finally, enable `oidc-service` and supply it the base64 encoded JWK from your server Helm values file: ++ +[source,yaml] +---- +oidc_service: + isEnabled: true + json_web_keys: << set your base64 encoded JWK from step 2 here >> +---- + +[#deploy] +== 4. Deploy + +Once you have completed the fields detailed above, you can deploy CircleCI's core services: + +[source,shell,subs=attributes+] +---- +USERNAME=<provided-username> +PASSWORD=<token> +namespace=<your-namespace> +helm registry login cciserver.azurecr.io/circleci-server -u $USERNAME -p $PASSWORD +helm install circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion47} -f <path-to-values.yaml> +---- + +[#create-dns-entry] +== 5. Create DNS entry +Create a DNS entry for your nginx load balancer, for example, `circleci.your.domain.com` and `app.circleci.your.domain.com`. The DNS entry should align with the DNS names used when creating your TLS certificate and GitHub OAuth app during the prerequisites steps. All traffic will be routed through this DNS record. + +You need the IP address, or, if using AWS, the DNS name of the nginx load balancer. You can find this information with the following command: + +[source,shell] +---- +kubectl get service -l app=circleci-proxy +---- + +[#validation] +== 6. Validation + +You should now be able to navigate to your CircleCI server installation and log in to the application successfully. + +Now we will move on to build services. It may take a while for all your services to be up. You can periodically check by running the following command (you are looking for the `frontend` pod to show a status of `running` and **ready** should show `1/1`): + +[source,shell] +---- +kubectl get pods -n <YOUR_CIRCLECI_NAMESPACE> +---- + +NOTE: Machine provisioner and Nomad server pods are expected to fail at this stage. You will set up your execution environments in the next phase of the installation. + +ifndef::pdf[] + +[#next-steps] +== Next steps +ifndef::env-aws[] +* xref:server-admin:installation:phase-3-gcp-execution-environments.adoc#[Phase 3 GCP: Execution Environments Installation] +endif::env-aws[] + +ifndef::env-gcp[] +* xref:server-admin:installation:phase-3-aws-execution-environments.adoc#[Phase 3 AWS: Execution Environments Installation] +endif::env-gcp[] + +endif::[] diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-3.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-3.adoc new file mode 100644 index 0000000000..48ce4b1b6f --- /dev/null +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-3.adoc @@ -0,0 +1,1176 @@ +Before you begin with the CircleCI server 4.9 execution environment installation phase, ensure you have run through the following: + +ifndef::env-aws[] +* xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#[Phase 1 GCP – Prerequisites] +* xref:server-admin:installation:phase-2-gcp-core-services.adoc#[Phase 2 GCP - Core Services Installation] +endif::env-aws[] + +ifndef::env-gcp[] +* xref:server-admin:installation:phase-1-aws-prerequisites.adoc#[Phase 1 AWS – Prerequisites] +* xref:server-admin:installation:phase-2-aws-core-services.adoc#[Phase 2 AWS - Core Services Installation] +endif::env-gcp[] + +NOTE: In the following sections, replace any sections indicated by `< >` with your details. + +== Introduction + +Execution environments in CircleCI server are resourced in the following ways: + +* HashiCorp Nomad is used for scheduling jobs and running Docker jobs. +** Nomad Servers can either be installed and configured within your Kubernetes cluster, or outside of your cluster within their own VMs. Nomad Servers are used for scheduling jobs. +** Nomad Clients are installed outside your Kubernetes cluster and are used for running jobs that are configured to use the Docker execution environment. +** CircleCI provides sample Terraform modules for you to reference and use to streamline the installation process if you are installing within AWS or GCP. Installing in this way also enables the use of the Nomad Autoscaler. + +* <<machine-provisioner,Machine provisioner>> is used in AWS/GCP installations to provision resources and run jobs that are configured to use the `machine` execution environment, including Linux and Windows VMs, including Arm resources, if your server installation is within AWS. If you install outside AWS or GCP (that is, a local installation) you will use self-hosted runners to run VM-configured jobs. + +* xref:guides:execution-runner:runner-overview.adoc#[Runner] can be used for all execution environments, including macOS. + +[#nomad-clients] +== 1. Nomad clients +Nomad is a workload orchestration tool that CircleCI uses to schedule (through Nomad server) and run (through Nomad clients) CircleCI jobs. + +Nomad clients are installed outside of the Kubernetes cluster, while their control plane (Nomad Server) can be installed within the cluster or externally. Communication between your Nomad Clients and the Nomad control plane is secured with mTLS. The mTLS certificate, private key, and certificate authority will be output after you complete installation of the Nomad Clients. + +NOTE: If you are installing CircleCI server locally, rather than in AWS or GCP, see the xref:server-admin:air-gapped-installation:phase-4-configure-nomad-clients.adoc#[Air-gapped] docs on setting up Nomad clients. + +[#create-your-cluster-with-terraform] +=== a. Create your cluster with Terraform + +CircleCI curates Terraform modules to help install Nomad clients in your chosen cloud provider. You can browse the modules in our link:https://github.com/CircleCI-Public/server-terraform[public repository], including example Terraform config files for both AWS and GCP. + +// Don't include this section in the GCP page: +ifndef::env-gcp[] + +[#aws-cluster] +==== AWS cluster +You need some information about your cluster and server installation to populate the required variables for the Terraform module. A full example, as well as a full list of variables, can be found in the link:https://github.com/CircleCI-Public/server-terraform/tree/main/nomad-aws[example AWS Terraform configuration]. + +* `Server_endpoint` - This is the domain name of the CircleCI application. + +* *Subnet ID (`subnet`)*, *VPC ID (`vpc_id`)*, and *DNS server (`dns_server`)* of your cluster. Run the following commands to get the cluster VPC ID (`vpcId`), and subnets (`subnetIds`): ++ +[source,shell] +---- +# Fetch VPC ID +aws eks describe-cluster --name=<cluster-name> --query "cluster.resourcesVpcConfig.vpcId" --region=<region> --output text | xargs + +# Fetch Subnet IDs +aws eks describe-cluster --name=<cluster-name> --query "cluster.resourcesVpcConfig.subnetIds" --region=<region> --output text | xargs +---- ++ +This returns something similar to the following: ++ +[source,text ] +---- +# VPC Id +vpc-02fdfff4ca + +# Subnet Ids +subnet-08922063f12541f93 subnet-03b94b6fb1e5c2a1d subnet-0540dd7b2b2ddb57e subnet-01833e1fa70aa4488 +---- ++ +Then, using the VPCID you just found, run the following command to get the CIDR Block for your cluster. For AWS, the DNS server is the third IP in your CIDR block (`CidrBlock`), for example your CIDR block might be `10.100.0.0/16`, so the third IP would be `10.100.0.2`. ++ +[source,shell] +---- +aws ec2 describe-vpcs --filters Name=vpc-id,Values=<vpc-id> --query "Vpcs[].CidrBlock" --region=<region> --output text | xargs +---- ++ +This returns something like the following: ++ +[source,text] +---- +192.168.0.0/16 +---- + +Once you have filled in the appropriate information, you can deploy your Nomad clients by running the following commands: + +[source,shell] +---- +terraform init +---- + +[source,shell] +---- +terraform plan +---- + +[source,shell] +---- +terraform apply +---- + +NOTE: The AMIs used in our Terraform are now based on Ubuntu 22.04 with CgroupsV1 enabled. CgroupsV2 is currently unsupported. These Nomad AMIs are built with automatic security updates enabled. You may use our AMIs as a base to build images with your own customizations or use the AWS image builder to create your own Nomad AMIs. Currently the only requirement is Ubuntu 22.04 with CgroupsV1 enabled. + +After Terraform is done spinning up the Nomad client(s), it outputs the certificates and keys needed for configuring the Nomad control plane in CircleCI server. Copy them somewhere safe. The apply process usually only takes a minute. + +// Stop hiding from GCP PDF: +endif::env-gcp[] + +// Don't include this section in the AWS page: +ifndef::env-aws[] + +[#gcp-cluster] +==== GCP cluster + +You need the following information: + +* The Domain name of the CircleCI application +* The GCP Project you want to run Nomad clients in +* The GCP Zone you want to run Nomad clients in +* The GCP Region you want to run Nomad clients in +* The GCP Network you want to run Nomad clients in +* The GCP Subnetwork you want to run Nomad clients in + +A full example, as well as a full list of variables, can be found in the link:https://github.com/CircleCI-Public/server-terraform/tree/main/nomad-gcp[example GCP Terraform configuration]. + +Once you have filled in the appropriate information, you can deploy your Nomad clients by running the following commands: + +[source,shell] +---- +terraform init +---- + +[source,shell] +---- +terraform plan +---- + +[source,shell] +---- +terraform apply +---- + +After Terraform is done spinning up the Nomad client(s), it outputs the certificates and key needed for configuring the Nomad control plane in CircleCI server. Copy them somewhere safe. + +// Stop hiding from AWS page +endif::env-aws[] + +[#nomad-autoscaler-configuration] +=== b. Nomad Autoscaler configuration +Nomad can automatically scale up or down your Nomad clients, provided your clients are managed by a cloud provider's auto scaling resource. With Nomad Autoscaler, you need to provide permission for the utility to manage your auto scaling resource and specify where it is located. CircleCI's Nomad Terraform module can provision the permissions resources, or it can be done manually. + +// Don't include this section in the GCP page: +ifndef::env-gcp[] + +[#aws-iam-role] +==== AWS autoscaler IAM/role +Create an IAM user or role and policy for Nomad Autoscaler. You may take **one** of the following approaches: + +* The CircleCI link:https://github.com/CircleCI-Public/server-terraform/tree/main/nomad-aws[Nomad module] creates an IAM user and outputs the keys if you set variable `nomad_auto_scaler = true`. You may reference the example in the link for more details. If you have already created the clients, you can update the variable and run `terraform apply`. The created user's access and secret key will be available in Terraform's output. +* Create a Nomad Autoscaler IAM user manually with the <<iam-policy-for-nomad-autoscaler,IAM policy below>>. Then, generate an access and secret key for this user. +* You may create a link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[Role for Service Accounts] for Nomad Autoscaler and attach the <<iam-policy-for-nomad-autoscaler,IAM policy below>>: + +When using access and secret keys, you have two options for configuration: + +[tabs] +==== +CircleCI creates Secret:: ++ +-- +**Option 1:** CircleCI creates the Kubernetes Secret for you. + +Add the access key and secret to your `values.yaml` file with any additional required configuration: + +[source, yaml] +---- +nomad: +... + auto_scaler: + aws: + accessKey: "<access-key>" + secretKey: "<secret-key>" +---- +-- +You create Secret:: ++ +-- +**Option 2:** Create the Kubernetes Secret yourself + +Instead of storing the access key and secret in your `values.yaml` file, you may create the Kubernetes Secret yourself. + +NOTE: When using this method, an additional field is required for this secret, as outlined below. + +First, add your access key, secret, and region to the following text, and encode it all with base64. + +[source] +---- +ADDITIONAL_CONFIG=`cat << EOF | base64 +target "aws-asg" { + driver = "aws-asg" + config = { + aws_region = "<aws-region>" + aws_access_key_id = "<access-key>" + aws_secret_access_key = "<secret-key>" + } +} +EOF` +---- + +Then, using that additional base64 encoded config, create the Kubernetes Secret. + +[source, shell] +---- +# With the base64-encoded additional config from above +kubectl create secret generic nomad-autoscaler-secret \ + --from-literal=secret.hcl=$ADDITIONAL_CONFIG +---- +-- +==== + + +[#iam-policy-for-nomad-autoscaler] +==== IAM policy for Nomad Autoscaler + +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "autoscaling:CreateOrUpdateTags", + "autoscaling:UpdateAutoScalingGroup", + "autoscaling:TerminateInstanceInAutoScalingGroup" + ], + "Resource": "<<Your Autoscaling Group ARN>>" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeAutoScalingGroups" + ], + "Resource": "*" + } + ] +} +---- + +endif::env-gcp[] + +ifndef::env-aws[] + +[#gcp-service-account] +==== GCP autoscaler service account +Create a service account for Nomad Autoscaler. You may take **one** of the following approaches: + +[tabs] +==== +CircleCI creates Secret:: ++ +-- +**Option 1:** CircleCI creates the Kubernetes Secret. + +The CircleCI link:https://github.com/CircleCI-Public/server-terraform/tree/main/nomad-gcp[Nomad module] can create a service account and output a file with the JSON key. For this option, set the variable `nomad_auto_scaler = true`. You may reference the examples in the link for more details. The created service account key will be available in a file named `nomad-as-key.json`. +-- +Use Workload Identity:: ++ +-- +**Option 2:** Use Workload Identity. + +The CircleCI link:https://github.com/CircleCI-Public/server-terraform/tree/main/nomad-gcp[Nomad module] can create a service account using xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#enable-workload-identity-in-gke[Workload Identity] and send out the email. Set the variables `nomad_auto_scaler = true` and `enable_workload_identity = true`. +-- +You create Secret:: ++ +-- +**Option 3:** Create the Kubernetes Secret yourself. + +NOTE: When creating the Kubernetes Secret manually, an additional field is required, as outlined below. + +[source, shell] +---- +# Base64 encoded additional configuration field +ADDITIONAL_CONFIG=dGFyZ2V0ICJnY2UtbWlnIiB7CiAgZHJpdmVyID0gImdjZS1taWciCiAgY29uZmlnID0gewogICAgY3JlZGVudGlhbHMgPSAiL2V0Yy9ub21hZC1hdXRvc2NhbGVyL2NyZWRzL2djcF9zYS5qc29uIgogIH0KfQo= +kubectl create secret generic nomad-autoscaler-secret \ + --from-literal=gcp_sa.json=<service-account> \ + --from-literal=secret.hcl=$ADDITIONAL_CONFIG +---- + +When creating a Nomad GCP service account manually, the service account will need the role `compute.admin`. It will also need the role `iam.workloadIdentityUser` if using Workload Identity. This step is only required if you choose not to create the service account using Terraform. +-- +==== + +endif::env-aws[] + +[#nomad-servers] +== 2. Nomad servers + +In the previous section you deployed your Nomad clients and have the IAM resources provisioned. Now you can deploy and configure the Nomad Servers. + +[#where-to-deploy-nomad-servers] +=== a. Where to deploy Nomad servers + +Nomad Servers are by default deployed within your CircleCI server cluster. However, Nomad Servers may be deployed externally. As with the Nomad clients, you can use the Terraform module CircleCI provides to deploy your Nomad Servers or as a guide for how such a deployment might look. + +* If you wish to deploy your Nomad servers inside your CircleCI server cluster, continue to <<nomad-gossip-encryption-key>> below. +* If you wish to deploy your Nomad servers externally, follow these steps: + +.. Update your Terraform by setting the variable `nomad_server_enabled` to `true`. +.. Run `terraform apply` to apply your changes. This will deploy Nomad Servers with the default configurations to the existing infrastructure you created when setting up the Nomad clients earlier. For a full list of the Nomad Server options, refer to the `variables.tf` in our +ifndef::env-aws[] +link:https://github.com/CircleCI-Public/server-terraform/blob/main/nomad-gcp/variables.tf[public repository]. +endif::env-aws[] +ifndef::env-gcp[] +link:https://github.com/CircleCI-Public/server-terraform/blob/main/nomad-aws/variables.tf[public repository]. +endif::env-gcp[] +.. Update the `nomad` block in your Helm `values.yaml` with the following: ++ +[source,yaml] +---- +nomad: + server: + internal: false + serverHostname: <your nomad server domain or load-balancer> +---- +.. Once the above is complete, you can skip ahead to <<nomad-autoscaler>>. + + +[#nomad-gossip-encryption-key] +=== b. Nomad gossip encryption key + +Nomad requires a key to encrypt communications. This key must be exactly 32 bytes long. CircleCI will not be able to recover the values if lost. Depending on how you prefer to manage Kubernetes Secrets, there are two options: + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl -n <namespace> create secret generic nomad-gossip-encryption-key \ +--from-literal=gossip-key=<secret-key-32-chars> + +kubectl -n <namespace> annotate secret/nomad-gossip-encryption-key \ + meta.helm.sh/release-name=<helm-release-name> \ + meta.helm.sh/release-namespace=<namespace> \ + helm.sh/resource-policy=keep --overwrite + +kubectl -n <namespace> label secret/nomad-gossip-encryption-key \ + app.kubernetes.io/managed-by=Helm --overwrite +---- + +Once the Kubernetes Secret exists, no change to `values.yaml` is required. The Kubernetes Secret will be referenced by default. +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** CircleCI creates the Kubernetes Secret. + +CircleCI will create the Kubernetes Secret `nomad-gossip-encryption-key` automatically. + +-- +==== + +[#nomad-mtls] +=== c. Nomad mTLS + +The `CACertificate`, `certificate` and `privateKey` can be found in the output of the Terraform module. You may either add the mTLS cert values to your `values.yaml` or you may create a secret named `nomad-mtls` in the same namespace as your CircleCI server installation. The values must be base64 encoded. + +[tabs] +==== +You create Secret:: ++ +-- +**Option 1:** Generate a secret with the mTLS cert values + +[source, shell] +---- +kubectl -n <namespace> create secret generic nomad-mtls \ + --ca.pem="<base64-encoded-certificate>" \ + --key.pem="<base64-encoded-private-key>" \ + --cert.pem="<base64-encoded-ca-certificate>" + +kubectl -n <namespace> annotate secret/nomad-mtls \ + meta.helm.sh/release-name=<helm-release-name> \ + meta.helm.sh/release-namespace=<namespace> \ + helm.sh/resource-policy=keep --overwrite + +kubectl -n <namespace> label secret/nomad-mtls \ + app.kubernetes.io/managed-by=Helm --overwrite +---- +-- +CircleCI creates Secret:: ++ +-- +**Option 2:** Add the base64 encoded values to your `values.yaml` + +[source,yaml] +---- +nomad: + server: + ... + rpc: + mTLS: + enabled: true + certificate: "<base64-encoded-certificate>" + privateKey: "<base64-encoded-private-key>" + CACertificate: "<base64-encoded-ca-certificate>" +---- +-- +==== + +[#nomad-autoscaler] +=== d. Nomad Autoscaler + +If you have enabled Nomad Autoscaler, also include the following section under `nomad`: + +// Don't include this section in the GCP page. +ifndef::env-gcp[] + +[#aws] +==== AWS +You created these values in the <<aws-iam-role,Nomad Autoscaler Configuration section>>. + +[source,yaml] +---- +nomad: + ... + auto_scaler: + enabled: true + scaling: + max: <max-node-limit> + min: <min-node-limit> + + aws: + enabled: true + region: "<region>" + autoScalingGroup: "<asg-name>" + + accessKey: "<access-key>" + secretKey: "<secret-key>" + # or + irsaRole: "<role-arn>" +---- +// Stop hiding from GCP PDF: +endif::env-gcp[] + +// Don't include this section in the AWS page: +ifndef::env-aws[] + +[#gcp] +==== GCP +You created these values in the <<gcp-service-account,Nomad Autoscaler Configuration section>>. + +[source,yaml] +---- +nomad: + ... + auto_scaler: + enabled: true + scaling: + max: <max-node-limit> + min: <min-node-limit> + + gcp: + enabled: true + project_id: "<project-id>" + mig_name: "<instance-group-name>" + + region: "<region>" + # or + zone: "<zone>" + + workloadIdentity: "<service-account-email>" + # or + service_account: "<service-account-json>" +---- + +// Stop hiding from AWS page +endif::env-aws[] + +=== e. Helm upgrade + +Apply the changes made to your `values.yaml` file: + +[source,shell,subs=attributes+] +---- +namespace=<your-namespace> +helm upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion47} -f <path-to-values.yaml> +---- + +[#nomad-clients-validation] +=== f. Nomad clients validation + +CircleCI has created a project called link:https://github.com/circleci/realitycheck[Reality Check] which allows you to test your server installation. We are going to follow the project so we can verify that the system is working as expected. As you continue through the next phase, sections of Reality Check will move from red (fail) to green (pass). + +Before running Reality Check, check if the Nomad servers can communicate with the Nomad clients by executing the below command. + +[source,shell] +---- +kubectl -n <namespace> exec -it $(kubectl -n <namespace> get pods -l app=nomad-server -o name | tail -1) -- nomad node status +---- + +You should be able to see output like this: + +[source,shell] +---- +ID DC Name Class Drain Eligibility Status +132ed55b default ip-192-168-44-29 linux-64bit false eligible ready +---- + +To run Reality Check, you need to clone the repository. Depending on your GitHub setup, you can use one of the following commands: + +[#github-cloud] +==== GitHub cloud + +[source,shell] +---- +git clone https://github.com/circleci/realitycheck.git +---- + +[#github-enterprise-nomad] +==== GitHub Enterprise + +[source,shell] +---- +git clone https://github.com/circleci/realitycheck.git +git remote set-url origin <YOUR_GH_REPO_URL> +git push +---- + +Once you have successfully cloned the repository, you can follow it from within your CircleCI server installation. You need to set the following variables. For full instructions refer to the link:https://github.com/circleci/realitycheck#prerequisites-1[repository README]. + +.Environmental Variables +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +|Name +|Value + +|CIRCLE_HOSTNAME +|<YOUR_CIRCLECI_INSTALLATION_URL> + +|CIRCLE_TOKEN +|<YOUR_CIRCLECI_API_TOKEN> + +|CIRCLE_CLOUD_PROVIDER +|< `aws`, `gcp`, or `other` > +|=== + +.Contexts +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +|Name +|Environmental Variable Key +|Environmental Variable Value + +|org-global +|CONTEXT_END_TO_END_TEST_VAR +|Leave blank + +|individual-local +|MULTI_CONTEXT_END_TO_END_VAR +|Leave blank +|=== + +Once you have configured the environmental variables and contexts, rerun the Reality Check tests. You should see the features and resource jobs complete successfully. Your test results should look something like the following: + +image::guides:ROOT:realitycheck-pipeline.png[Screenshot showing the Reality Check project building in the CircleCI app] + +[#machine-provisioner] +== 3. Machine provisioner + +NOTE: Overriding scaling options is currently not supported, but will be supported in the future. + +Machine provisioner is used to configure virtual machines for jobs that run in Linux VM, Windows and Arm VM execution environments, and those that are configured to use xref:reference:ROOT:configuration-reference.adoc#setupremotedocker[remote Docker]. Machine provisioner is unique to AWS and GCP installations because it relies on specific features of these cloud providers. + +Once you have completed the server installation process you can further configure machine provisioner, including the following: + +* Building and specifying a Windows image to give developers access to the Windows execution environment. +* Specifying an alternative Linux machine image, and specifying a number of preallocated instances to remain spun up at all times. + +For more information, see the xref:server-admin:operator:manage-virtual-machines-with-machine-provisioner.adoc#[Manage Virtual Machines with machine provisioner] page. + +Before moving on to platform specific steps, create your firewall rules. External VMs need the networking rules described in xref:installation:hardening-your-cluster.adoc#external-vms[Hardening your Cluster] + +ifndef::env-gcp[] + +[#aws-machine-provisioner] +=== AWS + +[#set-up-security-group] +==== Set up security group + +. *Get the information needed to create security groups* ++ +The following command returns your VPC ID (`vpcId`) and CIDR Block (`serviceIpv4Cidr`) which you need throughout this section: ++ +[source,shell] +---- +# Fetch VPC Id +aws eks describe-cluster --name=<cluster-name> --query "cluster.resourcesVpcConfig.vpcId" --region=<region> --output text | xargs + +# Fetch CIDR Block +aws eks describe-cluster --name=<cluster-name> --query "cluster.kubernetesNetworkConfig.serviceIpv4Cidr" --region=<region> --output text | xargs +---- + +. *Create a security group* ++ +Run the following commands to create a security group for machine provisioner: ++ +[source,shell] +---- +aws ec2 create-security-group --vpc-id "<VPC_ID>" --description "CircleCI machine provisioner security group" --group-name "circleci-machine-provisioner-sg" +---- ++ +This outputs a GroupID to be used in the next steps: ++ +[source, json] +{ + "GroupId": "<VM_SECURITY_GROUP_ID>" +} + +. *Apply the security group for SSH (If using public IP addresses for machines)* ++ +If using public IP addresses for your machine provisioner instances, run the following command to apply the security group rules so users can SSH into their jobs: ++ +[source,shell] +---- +aws ec2 authorize-security-group-ingress --group-id "<VM_SECURITY_GROUP_ID>" --protocol tcp --port 54782 --cidr "0.0.0.0/0" +---- + +[#set-up-authentication] +==== Set up authentication + +Authenticate CircleCI with your cloud provider in one of two ways: + +* IAM Roles for Service Accounts (IRSA) - **recommended** +* IAM access keys + +[tabs] +==== +IRSA:: ++ +-- +The following is a summary of link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[AWS's Documentation on IRSA] that is sufficient for configuring machine provisioner in CircleCI. + +. *Create Identity Provider* ++ +Create an IAM OIDC Identity Provider for your EKS Cluster: ++ +[source,shell] +---- +eksctl utils associate-iam-oidc-provider --cluster <CLUSTER_NAME> --approve +---- + +. *Get ARN* ++ +Get the OIDC provider ARN with the following command, you will need it in later steps: ++ +[source,shell] +---- +aws iam list-open-id-connect-providers | grep $(aws eks describe-cluster --name <CLUSTER_NAME> --query "cluster.identity.oidc.issuer" --output text | awk -F'/' '{print $NF}') +---- + +. *Get URL* ++ +Get your OIDC provider URL, you will need it in later steps ++ +[source,shell] +---- +aws eks describe-cluster --name <CLUSTER_NAME> --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///" +---- + +. *Create role* ++ +Create the role using the command and trust policy template below, you will need the Role ARN and name in later steps: ++ +[source,shell] +---- +aws iam create-role --role-name circleci-vm --assume-role-policy-document file://<TRUST_POLICY_FILE> +---- ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "<OIDC_PROVIDER_ARN>" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "<OIDC_PROVIDER_URL>:sub": "system:serviceaccount:<K8S_NAMESPACE>:machine-provisioner" + } + } + } + + ] +} +---- + +. *Create policy* ++ +Create the policy using the command and template below. Fill in the security group ID and the VPC ID: ++ +[source,shell] +---- +aws iam create-policy --policy-name circleci-vm --policy-document file://<POLICY_FILE> +---- ++ +[source, json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*::image/*", + "arn:aws:ec2:*::snapshot/*", + "arn:aws:ec2:*:*:key-pair/*", + "arn:aws:ec2:*:*:launch-template/*", + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:placement-group/*", + "arn:aws:ec2:*:*:security-group/<SECURITY_GROUP_ID>", + "arn:aws:ec2:*:*:volume/*" + ] + }, + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_1>", + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_2>" + ] + }, + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": { + "aws:RequestTag/ManagedBy": "circleci-machine-provisioner" + } + } + }, + { + "Action": [ + "ec2:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": "arn:aws:ec2:*:*:*/*", + "Condition": { + "StringEquals": { + "ec2:CreateAction": "RunInstances" + } + } + }, + { + "Action": [ + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringLike": { + "ec2:Subnet": [ + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_1>", + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_2>" + ] + }, + "StringEquals": { + "ec2:ResourceTag/ManagedBy": "circleci-machine-provisioner" + } + } + } + ] +} + +---- + +. *Attach policy* ++ +Attach the policy to the role: ++ +[source,shell] +---- +aws iam attach-role-policy --role-name <VM_ROLE_NAME> --policy-arn=<VM_POLICY_ARN> +---- + +. *Configure machine provisioner* ++ +Configure machine provisioner by adding the following to `values.yaml`: ++ +NOTE: If deploying in specific zone need to specify subnetwork for GCP ++ +[source,yaml] +---- +machine_provisioner: + providers: + ec2: + enabled: true + region: "<REGION>" + assignPublicIP: true + irsaRole: "<IRSA_ROLE_ARN>" + subnets: + - "<SUBNET_ID>" + securityGroupId: "<SECURITY_GROUP_ID>" +---- +-- +IAM Access Keys:: ++ +-- +. *Create user* ++ +Create a new user with programmatic access: ++ +[source,shell] +---- +aws iam create-user --user-name circleci-machine-provisioner +---- ++ +Optionally, machine provisioner does support the use of a link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[service account role] in place of AWS keys. If you would prefer to use a role, follow these link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[instructions] using the policy in step 6 below. +Once done, you may skip to step 9, enabling machine provisioner. ++ +. *Create policy* ++ +Create a `policy.json` file with the following content. You should fill in the ID of the machine provisioner security group created in step 2 (`MachineProvisionerSecurityGroupID`) and VPC ID (`vpcID`) below. ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*::image/*", + "arn:aws:ec2:*::snapshot/*", + "arn:aws:ec2:*:*:key-pair/*", + "arn:aws:ec2:*:*:launch-template/*", + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:placement-group/*", + "arn:aws:ec2:*:*:security-group/<SECURITY_GROUP_ID>" + "arn:aws:ec2:*:*:volume/*" + ] + }, + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_1>", + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_2>" + ] + }, + { + "Action": "ec2:RunInstances", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": { + "aws:RequestTag/ManagedBy": "circleci-machine-provisioner" + } + } + }, + { + "Action": [ + "ec2:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": "arn:aws:ec2:*:*:*/*", + "Condition": { + "StringEquals": { + "ec2:CreateAction" : "RunInstances" + } + } + }, + { + "Action": [ + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringLike": { + "ec2:Subnet": [ + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_1>", + "arn:aws:ec2:*:*:subnet/<SUBNET_ID_2>" + ] + }, + "StringEquals": { + "ec2:ResourceTag/ManagedBy": "circleci-machine-provisioner" + } + } + } + ] +} +---- + +. *Attach policy to user* ++ +Once you have created the policy.json file, attach it to an IAM policy and created user: ++ +[source,shell] +---- +aws iam put-user-policy --user-name circleci-machine-provisioner --policy-name circleci-machine-provisioner --policy-document file://policy.json +---- + +. *Create an access key and secret for the user* ++ +If you have not already created them, you will need an access key and secret for the `circleci-machine-provisioner` user. You can create those by running the following command: ++ +[source,shell] +---- +aws iam create-access-key --user-name circleci-machine-provisioner +---- + +. *Configure server (there are two options)* ++ +* *Option 1 - Add the keys to `values.yaml`* +Add the machine provisioner configuration to `values.yaml`. ++ +[source,shell] +---- +machine_provisioner: + providers: + ec2: + enabled: true + region: "<REGION>" + assignPublicIP: true + accessKey: "<ACCESS-KEY>" + secretKey: "<SECRET-KEY>" + subnets: + - "<SUBNET_ID>" + securityGroupId: "<SECURITY_GROUP_ID>" +---- ++ +* *Option 2 - Create the Kubernetes Secret yourself* +Instead of providing the access key and secret in your `values.yaml` file, you may create the Kubernetes Secret yourself. ++ +[source,shell] +---- +kubectl create secret generic machine-provisioner-secret \ + --from-literal=accessKey=<access-key> \ + --from-literal=secretKey=<secret-key> +---- +-- +==== + +endif::env-gcp[] + +ifndef::env-aws[] + +[#gcp-authentication] +=== GCP + +You need additional information about your cluster to complete the next section. Run the following command: + +[source,shell] +---- +gcloud container clusters describe +---- + +This command returns something like the following, which includes network, region, and other details that you need to complete the next section: + +[source, json] +---- +addonsConfig: + gcePersistentDiskCsiDriverConfig: + enabled: true + kubernetesDashboard: + disabled: true + networkPolicyConfig: + disabled: true +clusterIpv4Cidr: 10.100.0.0/14 +createTime: '2021-08-20T21:46:18+00:00' +currentMasterVersion: 1.20.8-gke.900 +currentNodeCount: 3 +currentNodeVersion: 1.20.8-gke.900 +databaseEncryption: +… +---- + +. *Create user* ++ +We recommend you create a unique service account to be used exclusively by machine provisioner. The Compute Instance Admin (Beta) role is broad enough to allow machine provisioner to operate. If you wish to make permissions more granular, you can use the Compute Instance Admin (beta) role link:https://cloud.google.com/compute/docs/access/iam#compute.instanceAdmin[documentation] as reference. ++ +[source,shell] +---- +gcloud iam service-accounts create circleci-server-vm --display-name "circleci-server-vm service account" +---- ++ +NOTE: If you are deploying CircleCI server in a shared VCP, you should create this user in the project in which you intend to run your VM jobs. + +. *Get the service account email address* ++ +[source,shell] +---- +gcloud iam service-accounts list --filter="displayName:circleci-server-vm service account" --format 'value(email)' +---- + +. *Apply role to service account* ++ +Apply the Compute Instance Admin (beta) role to the service account: ++ +[source,shell] +---- +gcloud projects add-iam-policy-binding <YOUR_PROJECT_ID> --member serviceAccount:circleci-server-vm@<PROJECT_ID>.iam.gserviceaccount.com --role roles/compute.instanceAdmin --condition=None +---- ++ +And: ++ +[source,shell] +---- +gcloud projects add-iam-policy-binding <YOUR_PROJECT_ID> --member serviceAccount:circleci-server-vm@<PROJECT_ID>.iam.gserviceaccount.com --role roles/iam.serviceAccountUser --condition=None +---- + +. *Enable Workload Identity for Service Account or get JSON key file* ++ +Choose one of the following options, depending on whether you are using Workload Identity. ++ +[tabs] +==== +Enable Workload Identity for Service Account:: ++ +-- +This step is required only if you are using link:https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity[Workload Identities] for GKE. Steps to enable Workload Identities are provided in xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#enable-workload-identity-in-gke[Phase 1 - Prerequisites]. + +[source,shell] +---- +gcloud projects add-iam-policy-binding <PROJECT_ID> \ + --member serviceAccount:circleci-server-vm@<PROJECT_ID>.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --condition=None + +gcloud iam service-accounts add-iam-policy-binding circleci-server-vm@<PROJECT_ID>.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:<GCP_PROJECT_ID>.svc.id.goog[circleci-server/machine-provisioner]" +---- +-- +Get Service Account JSON key file:: ++ +-- +If you are using link:https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity[Workload Identities] for GKE, this step is not required. + +After running the following command, you should have a file named `circleci-server-vm-keyfile` in your local working directory. You will need this when you configure your server installation. + +[source,shell] +---- +gcloud iam service-accounts keys create circleci-server-vm-keyfile --iam-account <YOUR_SERVICE_ACCOUNT_EMAIL> +---- +-- +==== + +. *Configure CircleCI server* ++ +When using service account keys for configuring access for the machine provisioner, there are two options. ++ +[tabs] +==== +CircleCI creates Secret:: ++ +-- +**Option 1:** CircleCI creates the Kubernetes Secret. + +Add the machine provisioner configuration to `values.yaml`. + +[source,yaml] +---- +machine_provisioner: + enabled: true + replicas: 1 + providers: + gcp: + enabled: true + project_id: <project-id> + network_tags: + - circleci-vm + - <your-network> + zones: + - <zone1> + - <zone2> + network: <network> + subnetwork: <subnetwork> + + service_account: <service-account-json> + # OR + workloadIdentity: "" # Leave blank if using JSON keys of service account else service account email address +---- + +-- +You create Secret:: ++ +-- +**Option 2:** Create the Kubernetes Secret yourself. + +Instead of providing the service account in your `values.yaml` file, you may create the Kubernetes Secret yourself. + +[source,shell] +---- +kubectl create secret generic machine-provisioner-secret \ + --from-literal=gcp_sa.json=<access-key> +---- +-- +==== + +endif::env-aws[] + +[#machine-provisioner-validation] +=== Machine provisioner validation + +Apply they changes made to your `values.yaml` file. + +[source,shell,subs=attributes+] +---- +namespace=<your-namespace> +helm upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion47} -f <path-to-values.yaml> +---- + +Once you have configured and deployed CircleCI server, you should validate that machine provisioner is operational. You can rerun the Reality Check project within your CircleCI installation and you should see the machine provisioner jobs complete. At this point, all tests should pass. + +[#runner] +== 4. Runner + +[#overview] +=== Overview + +CircleCI runner does not require any additional server configuration. CircleCI server ships ready to work with runner. However, you need to create a runner and configure the runner agent to be aware of your server installation. For complete instructions for setting up runner, see the xref:guides:execution-runner:runner-overview.adoc#[runner documentation]. + +NOTE: Runner requires a namespace per organization. CircleCI server can have many organizations. If your company has multiple organizations within your CircleCI installation, you need to set up a runner namespace for each organization within your server installation. + +ifndef::pdf[] + +[#next-steps] +== Next steps +ifndef::env-gcp[] +* xref:server-admin:installation:phase-4-aws-post-installation.adoc#[Phase 4 AWS: Post Installation] +endif::env-gcp[] + +ifndef::env-aws[] +* xref:server-admin:installation:phase-4-gcp-post-installation.adoc#[Phase 4 GCP: Post Installation] +endif::env-aws[] + +endif::pdf[] diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-4.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-4.adoc new file mode 100644 index 0000000000..f59b327c00 --- /dev/null +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-4.adoc @@ -0,0 +1,53 @@ +Before you begin with the CircleCI server 4.9 post installation phase, ensure you have run through the following: + +ifndef::env-aws[] +* xref:server-admin:installation:phase-1-gcp-prerequisites.adoc#[Phase 1 GCP – Prerequisites] +* xref:server-admin:installation:phase-2-gcp-core-services.adoc#[Phase 2 GCP - Core Services Installation] +* xref:server-admin:installation:phase-3-gcp-execution-environments.adoc#[Phase 3 GCP - Execution Environments Installation] +endif::env-aws[] + +ifndef::env-gcp[] +* xref:server-admin:installation:phase-1-aws-prerequisites.adoc#[Phase 1 AWS – Prerequisites] +* xref:server-admin:installation:phase-2-aws-core-services.adoc#[Phase 2 AWS - Core Services Installation] +* xref:server-admin:installation:phase-3-aws-execution-environments.adoc#[Phase 3 AWS - Execution Environments Installation] +endif::env-gcp[] + +NOTE: In the following sections, replace any sections indicated by `< >` with your details. + +[#backup-and-restore] +== Backup and restore + +Backups of CircleCI server can be created through link:https://velero.io/[Velero] which is detailed in our xref:server-admin:operator:backup-and-restore.adoc#[Backup and Restore] documentation. + +[#email-notifications] +== Email notifications + +Add email notification support by adding the following to `values.yaml`: + +[source,yaml] +---- +smtp: + host: <hostname-of-submission-server> + user: <username-for-submission-server> + password: <password-for-submission-server + port: <mail-port> +---- + +[#managing-orbs] +== Managing orbs + +CircleCI server installations include their own local orb registry. This registry is private to the server installation. All orbs referenced in project configs reference the orbs in the _server_ orb registry. You are responsible for maintaining orbs. This includes: + +* Copying orbs from the public registry. +* Updating orbs that may have been copied previously. +* Registering your company's private orbs, if you have any. + +For more information and steps to complete these tasks, see the xref:server-admin:operator:managing-orbs.adoc#[Orbs on server guide]. + +ifndef::pdf[] +[#next-steps] +== Next steps + +* xref:server-admin:installation:hardening-your-cluster.adoc#[Hardening Your Cluster] +* xref:server-admin:operator:backup-and-restore.adoc#[Backup & Restore] +endif::[] diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/additional-considerations.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/additional-considerations.adoc new file mode 100644 index 0000000000..229f9fee57 --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/additional-considerations.adoc @@ -0,0 +1,126 @@ += Additional considerations +:page-platform: Server 4.9, Server Admin +:page-description: This page presents some items that should be considered when starting an air-gapped installation of CircleCI server 4.9. +:experimental: + +[#non-tls-docker-registry-installations] +== Non-TLS Docker registry installations + +When configuring your air-gapped Docker registry, it is recommended to use TLS certificates to encrypt traffic. If using a non-TLS, or self-signed installation, the following additional steps will need to be taken. + +On machines that access the Docker registry using Docker, the Docker daemon config must be updated (located on Linux at `/etc/docker/daemon.json`). + +The insecure-registries section must be added to the file (if it exists), or the file must be created with the following if it does not. Make sure to include the full hostname and port of your registry, but do not include the protocol (`http://` or `https://`). + +[source, json] +---- +{ + "insecure-registries":["docker.example.internal:5000"] +} +---- + +This file will need to be configured on the following machines: + +- All Nomad nodes in the air-gapped environment +- Potentially all K3s (lightweight Kubernetes) nodes in the air-gapped environment, if using Docker-backed K3s + +In addition, on each K3s node, the following file must be configured at `/etc/rancher/k3s/registries.yaml`. Take note to include the protocol where referenced. + +[source, yaml] +---- +mirrors: + "docker.example.internal:5000": + endpoint: + - "http://docker.example.internal:5000" +configs: + "docker.example.internal:5000": + tls: + insecure_skip_verify: true +---- + +--- + + + +[#service-type-load-balancers-k3s] +== Service type load balancers in K3s + +CircleCI server makes use of link:https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer[Service] `type: LoadBalancer` Kubernetes resources to listen to traffic on multiple ports. + +If using a K3s installation, link:https://metallb.universe.tf/installation/[MetalLB] can be used to create a virtual load balancer on the K3s node, to allow ingress traffic to CircleCI server. + +Once installed, the following steps need to be followed: + +. Create a ConfigMap resource as an address pool for MetalLB. ++ +[source, yaml] +---- +apiVersion: "v1" +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - <<k3s_internal_ip_range_start>>-<<k3s_internal_ip_range_end>> +---- ++ +The address pool can be named something other than "default", but the annotations in `values.yaml` will need to be updated. If there is only one k3s node, the address range should have the same IP repeated (for example, `10.0.0.5-10.0.0.5`). + +. Once this ConfigMap resource is applied to the cluster (`kubectl apply -f metallb-configmap.yaml`), update the address pool name in `values.yaml` for the Helm installation. ++ +[source, yaml] +---- +# Additional nginx annotations +nginx: + annotations: + # This example uses MetalLB as a k3s load balancer + metallb.universe.tf/allow-shared-ip: default +---- + +. After installing the Helm chart, the `circleci-proxy` service must be patched to use the internal IP of the desired k3s node to act as the load balancer (this IP should be in the range entered in the ConfigMap above). The example below uses the IP address `10.0.0.5`. ++ +[source, bash] +---- +kubectl patch svc circleci-proxy -p '{"spec": {"type": "LoadBalancer", "externalIPs":["10.0.0.5"]}}' +---- + +. Once complete, DNS records can be created for your server installation (`server.internal.example.com`) and (`*.server.internal.example.com)` for `10.0.0.5`. + +[#tls-importing] +== Importing trusted TLS certificates + +When using a GitHub Enterprise instance with self-signed or custom certificate authority certificates, CircleCI server can be configured to trust these certificates using two methods, as described below. + +NOTE: Values provided for either of these methods are supported for GitHub Enterprise certificates only. No other VCS application is supported at this time. + +[#configuring-the-tls-imports-value] +=== Configuring the TLS imports value +In the `values.yaml` file, the `tls.imports` value can be configured to include a list of `hostname:port` combinations from which to retrieve and trust TLS certificates. + +[source, yaml] +---- +tls: + ... + import: + - github.airgap.example.com:443 +---- + +For each `hostname:port` combination, CircleCI server will, during installation, retrieve the public certificate for the particular GitHub Enterprise instance and trust it to establish connections with that instance. + + +[#configuring-the-tls-certificates-array] +=== Configuring the TLS certificates array +Instead of providing a list of `hostname:port` combinations for certificates to import, the public certificate chains of the corresponding TLS certificates to trust can be provided in the `values.yaml` file. Provide the trusted certificate list in the `tls.certificates` value, as a list of base64 encoded certificates strings. + +[source, yaml] +---- +tls: + ... + certificates: + - <<base64-encoded-public-tls-certificate-chain>> +---- diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/example-values.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/example-values.adoc new file mode 100644 index 0000000000..ba21d6078d --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/example-values.adoc @@ -0,0 +1,120 @@ += Example `values.yaml` +:page-platform: Server 4.9, Server Admin +:page-description: This page presents an example values.yaml file to help with setting up an air-gapped installation of CircleCI server 4.9. +:experimental: + +The following snippet shows an example `values.yaml` file for a Helm installation of CircleCI server in an air-gapped environment. + +[#resources] +== Resources +The chart assumes an environment with the following resources: + +- A K3s cluster for the installation of the Helm chart +- MetalLB pre-configued on the K3s cluster for ingress +- A private Docker registry at `docker.internal.example.com` running on port 5000 with no TLS encryption +- A Nomad instance with mTLS disabled +- A MinIO instance running at minio.internal.example.com, with its API listening on port 9000, and no TLS +- A TLS certificate issued for both domains `server.internal.example.com` and `*.server.internal.example.com``. + +For more information about specific values, see the standard installation documentation, starting with the following: + +* xref:installation:phase-2-aws-core-services.adoc[Phase 2 AWS - Core services] +* xref:installation:phase-2-gcp-core-services.adoc[Phase 2 GCP - Core services] + +[#values] +== `Values.yaml` + +[source, yaml] +---- + +# Private docker registry at docker.internal.example.com:5000 +global: + domainName: "server.internal.example.com" + license: '<<your-server-license-here>>' + container: + registry: "docker.internal.example.com:5000" + org: "<image-registry-org>" + +# GitHub Enterprise +github: + hostname: "github.internal.example.com" + unsafeDisableWebhookSSLVerification: true # If using self-signed certificates + enterprise: true + selfSignedCert: true # If using self-signed certificates + # These must be generated and added manually from GitHub Enterprise + clientId: "<<github-enterprise-oauth-app-client-id>>" + clientSecret: "<<github-enterprise-oauth-app-client-secret>>" + defaultToken: "<<github-enterprise-personal-application-token>>" + +# TLS with your provider +tls: + certificate: "<<your-generated-tls-certificate>>" + privateKey: "<<your-generated-tls-private-key>>" + +# Object storage with Minio +object_storage: + bucketName: "circleci-data" # Update to the name of the bucket created in MinIO + expireAfter: 0 + s3: + enabled: true + endpoint: "http://minio.internal.example.com:9000" + accessKey: "<<minio-username>>" + secretKey: "<<minio-password>>" + +# Distributor using CircleCI Agent in Minio +distributor: + agent_base_url: http://minio.internal.example.com:9000/circleci-data + +# Nomad +nomad: + buildAgentImage: "docker.internal.example.com:5000/circleci/picard" # Do not provide image version, only image name and registry + server: + gossip: + encryption: + key: "<<nomad-gossip-encryption-key>>" + rpc: + mTLS: + enabled: false # mTLS is disabled - it is recommended that this be enabled + + +# Machine provisioner disabled - Requires cloud connectivity +machine_provisioner: + enabled: false + +# Additional nginx annotations +nginx: + annotations: + # This example uses MetalLB as a k3s load balancer + metallb.universe.tf/allow-shared-ip: default + +# The below values require no special modifications for an air-gapped environment + +apiToken: "<<circleci-api-token>>" + +sessionCookieKey: "<<circleci-session-cookie-key>>" + +keyset: + signing: '<<circleci-signing-key>>' + encryption: '<<circleci-encryption-key>>' + +mongodb: + auth: + rootPassword: "<<mongodb-root-password>>" + password: "<<mongodb-password>>" + +pusher: + secret: "<<pusher-secret>>" + +postgresql: + auth: + postgresPassword: "<<postgres-password>>" + +rabbitmq: + auth: + password: "<<rabbitmq-password>>" + erlangCookie: "<<rabbitmq-erlang-cookie>>" + +docker_provisioner: + enabled: false + +---- diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc new file mode 100644 index 0000000000..d7c1a66eda --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc @@ -0,0 +1,150 @@ += Phase 1 - Prerequisites +:page-platform: Server 4.9, Server Admin +:experimental: +:page-description: A guide to installing CircleCI server 4.9 in an air-gapped environment. Requirements, images and Helm charts. + +The guides in this section walk you through the steps required to install CircleCI server in an air-gapped environment. + +[#required-components] +== Required components for air-gapped installation +The following table shows an overview of the prerequisites required to run an air-gapped CircleCI server installation. These items must be present in your air-gapped environment in order to move forward with the installation. + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Component +| Used for + +| Container registry +| Hosting CircleCI server images for Kubernetes + +| A Kubernetes cluster +| Running CircleCI server + +| An object storage system (MinIO) +| Object and artifact storage + +| GitHub Enterprise +| Source of code for CircleCI server to build + +| Nomad Virtual Machines +| Machines for running Nomad + +|=== + +[#copy-images] +== 1. Copy images + +CircleCI server is deployed into a Kubernetes cluster using a Helm chart. You will need to copy the latest images and charts from CircleCI into your virtual environment. All images referenced in the CircleCI Helm chart must be accessible within your air-gapped environment. + +[#login-to-acr] +=== a. Log in to CircleCI server container registry +Access to the images you need for your installation requires you to use a username and token, which will be provided by CircleCI. Access the link:https://support.circleci.com/[CircleCI support portal] for more information. + +[,bash] +---- +docker login cciserver.azurecr.io --username <your-circleci-provided-username> --password <your-circleci-provided-token> +---- + +=== b. Download all images required for this release +Download all images required for the release of CircleCI server to your local machine. This list contains all required images for the Helm installation, the CircleCI Agent, and the Reality Check testing tool. + +[,bash] +---- +SERVER_4_8_IMAGE_LIST=`cat <<EOF +cciserver.azurecr.io/api-gateway:0.1.61921-e4a01bc +cciserver.azurecr.io/api-service:0.1.21904-86fef64a +cciserver.azurecr.io/approval-job-provider-migrator:1.0.23431-d070abe +cciserver.azurecr.io/audit-log-service:0.1.2153-c94eb0f +cciserver.azurecr.io/authentication-svc:0.1.45454-e22f36f +cciserver.azurecr.io/authentication-svc-migrator:0.1.17533-7681416 +cciserver.azurecr.io/branch-service:0.1.8001-c4fda8e +cciserver.azurecr.io/branch-service-migrator:0.1.8000-c4fda8e +cciserver.azurecr.io/builds-service:1.0.7795-fb357b9 +cciserver.azurecr.io/builds-service-migrator:1.0.7795-fb357b9 +cciserver.azurecr.io/ciam:0.1.42389-d9b9756 +cciserver.azurecr.io/ciam-gateway:0.1.11398-bfc865d +cciserver.azurecr.io/circle-www-api:0.1.711488-642e2d1832 +cciserver.azurecr.io/contexts-service:0.1.24798-a6b197a +cciserver.azurecr.io/contexts-service-migrator:0.1.24797-a6b197a +cciserver.azurecr.io/cron-service:0.1.5830-6ec2408 +cciserver.azurecr.io/cron-service-migrator:0.1.5831-6ec2408 +cciserver.azurecr.io/distributor:0.1.141342-97f6a50c +cciserver.azurecr.io/distributor-migrator:0.1.141343-97f6a50c +cciserver.azurecr.io/docker-provisioner:0.1.42238-2278cdc +cciserver.azurecr.io/domain-service:0.1.18952-cb0f939 +cciserver.azurecr.io/domain-service-migrator:0.1.18951-cb0f939 +cciserver.azurecr.io/execution-gateway:0.1.25576-5a1b55e +cciserver.azurecr.io/feature-flags:0.1.8767-2d3e16b +cciserver.azurecr.io/init-known-hosts:1.0.72-cca8263 +cciserver.azurecr.io/insights-service:0.1.92656-9aba6041 +cciserver.azurecr.io/insights-service-migrator:0.1.92655-9aba6041 +cciserver.azurecr.io/machine-provisioner:0.1.92089-38be0c2 +cciserver.azurecr.io/machine-provisioner-migrator:0.1.92084-38be0c2 +cciserver.azurecr.io/orb-service:0.1.102697-5efbd597 +cciserver.azurecr.io/orb-service-analytics-migrator:0.1.102699-5efbd597 +cciserver.azurecr.io/orb-service-migrator:0.1.102697-5efbd597 +cciserver.azurecr.io/output:0.1.30932-9b859a8 +cciserver.azurecr.io/permissions-service:0.1.42391-d9b9756 +cciserver.azurecr.io/permissions-service-migrator:0.1.42390-d9b9756 +cciserver.azurecr.io/policy-service:0.1.9419-5d7da65 +cciserver.azurecr.io/public-api-service:0.1.38704-762b942 +cciserver.azurecr.io/runner-admin:0.1.27514-fb12e67 +cciserver.azurecr.io/runner-admin-migrator:0.1.27508-fb12e67 +cciserver.azurecr.io/server-license:1.0.90-16b88b3 +cciserver.azurecr.io/server-postgres:12.16.37-7629bfd +cciserver.azurecr.io/server-postgres:12.22.445-4d84973 +cciserver.azurecr.io/server-rabbitmq:3.12.423-3363c50 +cciserver.azurecr.io/step:0.1.9950-a5e4a61 +cciserver.azurecr.io/vault-cci:0.4.196-1af3417 +cciserver.azurecr.io/webhook-service:0.1.12058-da092c4 +cciserver.azurecr.io/webhook-service-migrator:0.1.12059-da092c4 +cciserver.azurecr.io/web-ui:0.1.133999-ac8148f608 +cciserver.azurecr.io/web-ui-authentication:0.1.119922-bd2b764ed5 +cciserver.azurecr.io/web-ui-server-admin:0.1.127516-c8690d5814 +cciserver.azurecr.io/workflows-conductor:1.0.23431-d070abe +cciserver.azurecr.io/workflows-conductor-migrator:1.0.23431-d070abe +circleci/picard:1.0.275302-e49eabce +docker.io/bitnami/mongodb:3.6.22-debian-9-r38 +docker.io/bitnami/redis:6.2.1-debian-10-r13 +docker.io/library/telegraf:1.34-alpine +hashicorp/nomad:1.10.0 +hashicorp/nomad-autoscaler:0.4.6 +kong:3.4.2 +nginxinc/nginx-unprivileged:1.28.0 +quay.io/soketi/soketi:1.6-16-distroless +EOF +` +---- + +[source, bash] +---- +echo $SERVER_4_8_IMAGE_LIST | while read -r image; do docker pull $image; done +---- + +[#copy-all-images] +=== c. Copy all images to your air-gapped environment +Copy all downloaded images to the container registry in your air-gapped environment. + +[#copy-charts] +== 2. Copy charts +Copy the CircleCI server Helm chart to your air-gapped environment. + +[#download-helm-chart] +=== a. Download the latest CircleCI server Helm chart +Download the latest CircleCI server Helm chart to the current directory. + +[,bash] +---- +helm registry login cciserver.azurecr.io +helm pull oci://cciserver.azurecr.io/circleci-server -d ./ +---- + +[#upload-helm-chart] +=== b. Copy the Helm chart to your air-gapped environment +Copy the downloaded `.tgz` Helm chart to your air-gapped environment. + +[#next-steps] +== Next steps + +Once the steps on this page are complete, go to the xref:phase-2-configure-object-storage.adoc[Phase 2 - Configure object storage] guide. diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc new file mode 100644 index 0000000000..e2af824e21 --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc @@ -0,0 +1,197 @@ += Phase 2 - Configure object storage +:page-platform: Server 4.9, Server Admin +:page-description: How to configure object storage through MinIO to run CircleCI server 4.9 in an air-gapped environment. +:experimental: + +pass:[<!-- vale off -->] +[#create-buckets-in-minio] +== 1. Create buckets in MinIO +CircleCI server supports link:https://min.io/[MinIO] for air-gapped object storage. MinIO is used in the air-gapped environment as a replacement for cloud-based object storage, such as GCP GCS or AWS S3. With an air-gapped MinIO instance installed, complete the steps outlined in the next sections. + +image::guides:ROOT:./minio/minio_install_0.png[Create a bucket in MinIO] + +[#creates-data-bucket] +=== a. Create a `circleci-data` bucket. +Create a new bucket in MinIO named `circleci-data`. + +NOTE: This bucket can be given a different name, but references to it will need to be updated in the Helm `values.yaml` file. + +image::guides:ROOT:./minio/minio_created_buckets.png[Buckets created in MinIO] + +[#configure-data-bucket] +=== b. Configure the `circleci-data` bucket +In the settings for the `circleci-data` bucket, change the access policy to `public`. + +image::guides:ROOT:./minio/minio_modify_access_policy.png[Setting `circleci-data` bucket access policy to public] + +[#copy-build-agent] +== 2. Copy the CircleCI build agent +Follow the steps in this section to copy the pinned CircleCI build agent into MinIO within your air-gapped environment. + +[#retrieve-pinned-agent-version] +=== a. Extract the pinned agent version from the Helm chart +Fetch the `circleci-server` Helm chart and extract the pinned agent version from it. Set this to the environment variable `$CIRCLE_AGENT_VERSION` so that it can be referenced in later steps. + +[source,bash] +---- +# Fetch the Helm chart for inspection. Replace `<version>` with the full version of CircleCI server. +helm fetch oci://cciserver.azurecr.io/circleci-server --version <version> --untar + +# Set `$CIRCLE_AGENT_VERSION` to the value of `circleci/picard` in `images.yaml`. +export CIRCLE_AGENT_VERSION=$(grep 'circleci/picard:' ./circleci-server/images.yaml | cut -d' ' -f2) + +# Verify `$CIRCLE_AGENT_VERSION` is set. The value should be similar in form to `1.0.217358-0b5336a7`. +echo $CIRCLE_AGENT_VERSION +---- + +[#copy-release-txt] +=== b. Create a release.txt file and copy to MinIO +Create a `release.txt` file with the value of `$CIRCLE_AGENT_VERSION`. Copy this file to your air-gapped environment and place it in the root of the `circleci-data` bucket in MinIO. + +[#retrieve-pinned-agent-bin] +=== c. Retrieve the pinned agent binary +Retrieve and download the pinned `circleci-agent` release and checksums from the CircleCI binary releases public bucket. + +[,bash] +---- +# Download circleci-agent +curl -O --compressed https://circleci-binary-releases.s3.amazonaws.com/circleci-agent/$CIRCLE_AGENT_VERSION/linux/amd64/circleci-agent + +# Download checksums +curl -O https://circleci-binary-releases.s3.amazonaws.com/circleci-agent/$CIRCLE_AGENT_VERSION/checksums.txt +---- + +[#create-release-dir] +=== d. Create a release directory in the `circleci-data` bucket +Using the version specified by `release.txt`, create a new directory in the root of the `circleci-data` bucket with the name of that release. In the following examples, a directory is created at the root of `circleci-data` in MinIO with the name `$CIRCLE_AGENT_VERSION`. Note `$CIRCLE_AGENT_VERSION` is not a literal string, but the value of the environment variable we set earlier. + +image::guides:ROOT:./minio/minio_create_release_dir.png[Creating a directory in the `circleci-data` bucket] + +[#upload-checksums-file] +=== e. Upload the checksums.txt file to the newly created directory. +Copy the downloaded `checksums.txt` file (step c) to your virtual environment, and place it in MinIO nested under the newly created release directory (step d). + +[,shell] +---- +# The structure of the directory should now look like this. +# Note `$CIRCLE_AGENT_VERSION` is not a literal string, but the value of the environment variable we set earlier. +$CIRCLE_AGENT_VERSION/ +└── checksums.txt +---- + +image::guides:ROOT:./minio/minio_upload_checksums.png[Uploading `checksums.txt` into the release directory] + +[#create-new-subdirs] +=== f. Create two new subdirectories in the release directory +Within the release directory (step d), create two new nested subdirectories, first `linux`, and then within it, `amd64`. + +[,shell] +---- +# The structure of the directory of the bucket should look like this: +$CIRCLE_AGENT_VERSION/ +├── checksums.txt +└── linux/ + └── amd64/ +---- + +image::guides:ROOT:./minio/minio_create_linux_dir.png[Creating a Linux dir] + +image::guides:ROOT:./minio/minio_create_amd_dir.png[Creating an AMD dir] + +[#copy-build-agent-bin] +=== g. Copy the downloaded `circleci-agent` file +Copy the downloaded `circleci-agent` file (step c) to your virtual environment, and place it in the amd64 directory you just created. + +[,shell] +---- +# The final structure of the bucket should look similar to this: +CIRCLE_AGENT_VERSION/ +├── checksums.txt +└── linux/ + └── amd64/ + └── circleci-agent +---- + +image::guides:ROOT:./minio/minio_upload_cci_agent.png[Uploading the CircleCI agent] + +[#copy-other-agents] +== 3. Copy other agents + +In a similar manner to `circleci-agent` from step 2, we also need to copy `docker-agent` and `machine-agent` to the `circleci-data` bucket. + +[#download-docker-agent] +=== a. Download docker agent + +[source,bash] +---- +# Set `DOCKER_AGENT_VERSION` using the value of `circleci/docker-agent` from `images.yaml` +export DOCKER_AGENT_VERSION=$(grep 'circleci/docker-agent:' ./circleci-server/images.yaml | cut -d' ' -f2) + +# Download the Docker agent +curl -O --compressed https://circleci-binary-releases.s3.amazonaws.com/docker-provisioner/$DOCKER_AGENT_VERSION/linux/amd64/agent + +# Download the checksums +curl -O https://circleci-binary-releases.s3.amazonaws.com/docker-provisioner/$DOCKER_AGENT_VERSION/checksums.txt +---- + +[#download-machine-agent] +=== b. Download machine agent + +[source,bash] +---- +# Set `MACHINE_AGENT_VERSION` using the value of `circleci/machine-agent` from `images.yaml` +export MACHINE_AGENT_VERSION=$(grep 'circleci/machine-agent:' ./circleci-server/images.yaml | cut -d' ' -f2) + +# Download the Machine agent +curl -O --compressed https://circleci-binary-releases.s3.amazonaws.com/machine-provisioner/$MACHINE_AGENT_VERSION/linux/amd64/agent + +# Download the checksums +curl -O https://circleci-binary-releases.s3.amazonaws.com/machine-provisioner/$MACHINE_AGENT_VERSION/checksums.txt +---- + + +[#copy-agents] +=== c. Copy the downloaded agents + +Copy the downloaded `docker-agent` and `machine-agent` files (from steps a and b) to your virtual environment, and create a directory structure as follows: + +[,shell] +---- +# The final structure should look like this: +docker-provisioner/ +├── release.txt # contains the value of $DOCKER_AGENT_VERSION +└── DOCKER_AGENT_VERSION/ # not a literal string, but the value of release.txt + ├── checksums.txt + └── linux/ + └── amd64/ + └── agent +machine-provisioner/ +├── release.txt # contains the value of $MACHINE_AGENT_VERSION +└── MACHINE_AGENT_VERSION/ # not a literal string, but the value of release.txt + ├── checksums.txt + └── linux/ + └── amd64/ + └── agent +---- + + +[#copy-other-miscellaneous-files] +== 4. Copy other miscellaneous files +Follow the steps in this section to copy the required files into MinIO within your air-gapped environment. + +[#copy-canary-txt-file] +=== a. Copy canary.txt file +Download the `canary.txt` file required by distributor. + +[,bash] +---- +# Download canary.txt +curl -O https://circleci-binary-releases.s3.amazonaws.com/circleci-agent/canary.txt +---- + +Copy this `canary.txt` file to the root directory of the `circleci-data` bucket. + +[#next-steps] +== Next steps + +Once the steps on this page are complete, go to the xref:phase-3-install-circleci-server.adoc[Phase 3 - Install CircleCI server] guide. diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc new file mode 100644 index 0000000000..acdd7cc5ec --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc @@ -0,0 +1,169 @@ += Phase 3 - install CircleCI server +:page-platform: Server 4.9, Server Admin +:page-description: How to install the CircleCI server 4.9 Helm deployment to an air-gapped environment. +:experimental: + +With prerequisites installed, and object storage configured, you can now copy over and install the CircleCI Helm deployment to the Kubernetes cluster in your air-gapped environment. + +[#prepare-values-yaml] +== 1. Prepare `values.yaml` +The `values.yaml` file for installing CircleCI should be prepared according to the Create Helm Values section of the regular installation guide (xref:installation:phase-2-aws-core-services.adoc#create-helm-values[AWS], xref:installation:phase-2-gcp-core-services.adoc#create-helm-values[GCP]). Once this is complete, you will modify the fields listed in the steps below for air-gapped installation compatibility. + +=== a. Configure global values + +NOTE: For a full example of a `values.yaml` file for an air-gapped environment, see the xref:example-values.adoc[Example `values.yaml`] page. + +In the `global.container` section of your `values.yaml` file: + +* Set the `domainName` value to the internal domain name of the installation +* Set `container.org` to `circleci` +* Set `container.registry` to the internal registry hostname (and port if using a non-standard port) of your instance + +[source, yaml] +---- +global: + ... + domainName: "server.internal.example.com" + container: + registry: "docker.internal.example.com:5000" + org: "<image-registry-org>" +---- + +NOTE: If you are using a non-TLS installation for your Docker registry, visit the xref:additional-considerations.adoc#non-tls-docker-registry-installations[Non-TLS Docker Installation] section for consideration. + +=== b. Configure GitHub Enterprise +In the `github` section of your `values.yaml` file, configure settings for your GitHub Enterprise installation. + +The `hostname` value should be the internal hostname of your GitHub enterprise installation. The `enterprise` value should be set to true. + +If this instance is not using TLS or self-signed certificates, make sure to set the `unsafeDisableWebhookSSLVerification: true` and `selfSignedCert: true` values. + +Additionally, an OAuth application and a personal access token should be set in GitHub Enterprise and values provided as shown below. For more information about this process, see the documentation for creating a GitHub OAuth application. For details, see the xref:installation:phase-1-aws-prerequisites.adoc#create-a-new-github-oauth-app[AWS] or xref:installation:phase-1-gcp-prerequisites.adoc#create-a-new-github-oauth-app[GCP] installation guide. + +[source, yaml] +---- +# GitHub Enterprise +github: + hostname: "github.internal.example.com" + unsafeDisableWebhookSSLVerification: true # If using self-signed certificates + enterprise: true + selfSignedCert: true # If using self-signed certificates + # These must be generated and added manually from GitHub Enterprise + clientId: "<<github-enterprise-oauth-app-client-id>>" + clientSecret: "<<github-enterprise-oauth-app-client-secret>>" + defaultToken: "<<github-enterprise-personal-application-token>>" +---- + +[#configure-minio-storage] +=== c. Configure MinIO object storage +In the `object_storage` section of the `values.yaml` file, add the following configuration, modifying ports as necessary. + +[source, yaml] +---- +object_storage: + bucketName: circleci-data + expireAfter: 0 + s3: + enabled: true + endpoint: http://<minio-internal-hostname>:9000 + accessKey: <minio-root-user> + secretKey: <minio-root-password> +---- + +NOTE: Update the endpoint protocol to `http` or `https` depending on your MinIO installation. + + +=== d. Configure MinIO build agent +In the distributor section of the `values.yaml` file, point `agent_base_url` to the `circleci-data` bucket you created in your MinIO installation. + +[source, yaml] +---- +distributor: + agent_base_url: http://<minio-internal-hostname>:9000/circleci-data/ +---- + +NOTE: Port 9000 is referenced here as that is a default for MinIO. If your MinIO instance is configured differently, this port will need to be updated. + +NOTE: Update the protocol to `http` or `https` depending on your MinIO installation. + + +[#configure-build-agent-image] +=== e. Configure the Nomad build agent image +Specify the location of the Nomad build agent image within your registry, copied during phase 1, modifying the port as necessary. + +[source, yaml] +---- +nomad: + ... + buildAgentImage: "<your-internal-registry-hostname>:5000/circleci/picard" +---- + +NOTE: Update the port on your Docker registry hostname as necessary + +NOTE: If using a non-TLS installation for your Docker registry, visit the xref:additional-considerations.adoc#non-tls-docker-registry-installations[Non-TLS Docker Installation] section for consideration. + + +[#configure-machine-provisioner] +=== f. Disable machine provisioner +In the `machine_provisioner` section of `values.yaml`, set `enabled` to `false` to disable it. This feature requires cloud connectivity. + +[source, yaml] +---- +# Machine provisioner disabled - Requires cloud connectivity +machine_provisioner: + enabled: false +---- + +When following these instructions to simply re-host agents in a partially air-gapped environment, you can configure the machine provisioner for MinIO as follows: + +[source, yaml] +---- +machine_provisioner: + agent_base_url: http://<minio-internal-hostname>:9000/circleci-data/machine-provisioner +---- + +[#configure-docker-provisioner] +=== g. Configure Docker provisioner + +Update the `docker_provisioner` section of the `values.yaml` file to point `agent_base_url` to the MinIO bucket: + +[source, yaml] +---- +docker_provisioner: + agent_base_url: http://<minio-internal-hostname>:9000/circleci-data/docker-provisioner +---- + + +[#add-additional-nginx-annotations] +=== h. Add additional nginx annotations +Add any additional nginx annotations as necessary depending on your installation to provision a load balancer. In this example, MetalLB is used. For more information, see the xref:additional-considerations.adoc#service-type-load-balancers-k3s[Service Type Load Balancers in K3s] section on the Additional considerations page. + +[source, yaml] +---- +# Additional nginx annotations +nginx: + annotations: + # This example uses MetalLB as a k3s load balancer + metallb.universe.tf/allow-shared-ip: default +---- + +[#install-circleci-server-helm-airgap] +== 2. Install CircleCI server + +With your completed `values.yaml` file and the copied Helm chart, run the Helm install command in your air-gapped environment to install CircleCI server. + +We recommend first creating a namespace (`circleci-server`) and deploying the chart into that namespace. + +[source,bash,subs=attributes+] +---- +helm install circleci-server ./circleci-server/ -n <kubernetes-namespace> --version {serverversion49} -f <path-to-values.yaml> +---- + +[#post-install-circleci-server-helm-airgap] +== 3. Post installation steps +After the Helm deployment, depending on your installation, it may be necessary to manually patch the `circleci-proxy` Load Balancer service (such as when using MetalLB). For more information, see the xref:additional-considerations.adoc#service-type-load-balancers-k3s[Service Type Load Balancers in K3s] section on the Additional considerations page. + +[#next-steps] +== Next steps + +Once the steps on this page are complete, go to the xref:phase-4-configure-nomad-clients.adoc[Phase 4 - Configure Nomad clients] guide. diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc new file mode 100644 index 0000000000..519a103173 --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc @@ -0,0 +1,134 @@ += Phase 4 - Configure Nomad clients +:page-platform: Server 4.9, Server Admin +:page-description: How to configure Nomad clients to run with CircleCI server 4.9 in an air-gapped environment. +:experimental: + +CircleCI server uses Nomad clients to perform container-based build actions. These machines will need to exist within the air-gapped environment to communicate with the CircleCI server Helm deployment. CircleCI server requires Nomad client images to use CGroupsV1 and is not compatible with CgroupsV2. + +NOTE: In the following sections, replace any sections indicated by `< >` with your details. + +[#install-docker-and-nomad] +== 1. Install Docker and Nomad +Install the following two prerequisites on each instance designated as a Nomad client: + +- link:https://docs.docker.com/get-docker/[An installation of Docker] +- link:https://developer.hashicorp.com/nomad/install[An installation of Nomad] + + +[#create-nomad-directories] +== 2. Create Nomad directories +On each machine, create a config directory. If your installation uses mTLS, also create a nested SSL directory: + +[source, bash] +---- +sudo mkdir /etc/nomad/ +sudo mkdir /etc/nomad/ssl/ +---- + +[#create-ci-docker-network] +== 3. Create the Docker ci-privileged network +Create a Docker network named `ci-privileged` with the following command. Once complete, restart the Nomad service. + +[source, bash] +---- +sudo docker network create --label keep --driver=bridge --opt com.docker.network.bridge.name=ci-privileged ci-privileged + +sudo service nomad restart +---- + +[#retrieve-mtls-certificates] +== 4. (Optional) Retrieve mTLS certificates + +If Nomad mTLS is configured on your installation, you will need to provide three files to each Nomad client. If mTLS is not configured, you can skip this step. + +- `ca.pem` +- `key.pem` +- `cert.pem` + +These files can be retrieved from a secret in the namespace of the CircleCI server Helm installation (`nomad-rpc-mtls`) using the following command: + +[source, bash] +---- +kubectl get secret -n <namespace> nomad-rpc-mtls -o yaml > secret.yaml +---- + +This command will output the `secret.yaml` file to stdout. Each required file (`ca.pem`, `key.pem`, `cert.pem`) is stored as a base64 encoded string within the secret. Each string must be copied, decoded, and placed in a file in each of your Nomad clients. + +[source, bash] +---- +# For each of ca.pem, key.pem, cert.pem in the secret output +echo -n "ca.pem-base64-encoded-string" | base64 --decode > ca.pem +echo -n "cert.pem-base64-encoded-string" | base64 --decode > cert.pem +echo -n "key.pem-base64-encoded-string" | base64 --decode > key.pem +---- + +== 5. (Optional) Copy mTLS keys to each Nomad client +If using mTLS, the `ca.pem`, `key.pem`, and `cert.pem` keys must be copied to each client and placed in the locations listed below. If mTLS is not configured, you can skip this step. + +[source, text] +---- +/etc/nomad/ssl/ca.pem +/etc/nomad/ssl/cert.pem +/etc/nomad/ssl/key.pem +---- + +== 6. Configure the Nomad conf.hcl file on each machine +For each Nomad client, configure the following `conf.hcl` file at `/etc/nomad/conf.hcl`. Remember to replace all items displayed between `< >`. + +[source, hcl] +---- +log_level = "DEBUG" +name = "<instance-hostname>" +data_dir = "/opt/nomad" +datacenter = "default" +advertise { + http = "<instance-internal-ip>" + rpc = "<instance-internal-ip>" + serf = "<instance-internal-ip>" +} + +client { + enabled = true + server_join = { + retry_join = ["<kubernetes-cluster-internal-ip>:4647"] + } + node_class = "linux-64bit" + options = {"driver.raw_exec.enable" = "1"} +} + +telemetry { + collection_interval = "1s" + disable_hostname = true + prometheus_metrics = true + publish_allocation_metrics = true + publish_node_metrics = true +} + +---- + +If mTLS is configured, the following block must also be added at the end of the `conf.hcl` file: + +[source, hcl] +---- +tls { + http = false + rpc = true + verify_server_hostname = true + ca_file = "/etc/nomad/ssl/ca.pem" + cert_file = "/etc/nomad/ssl/cert.pem" + key_file = "/etc/nomad/ssl/key.pem" +} +---- + +== 7. Test connectivity +Test connectivity between your clients and cluster by starting the Nomad agent and observing logs. + +[source, bash] +---- +sudo nomad agent -config /etc/nomad/conf.hcl +---- + +[#next-steps] +== Next steps + +Once the steps on this page are complete, go to the xref:phase-5-test-your-installation.adoc[Phase 5 - Test your installation] guide. diff --git a/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc new file mode 100644 index 0000000000..1b91aafc21 --- /dev/null +++ b/docs/server-admin-4.9/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc @@ -0,0 +1,48 @@ += Phase 5 - Test your installation +:page-platform: Server 4.9, Server Admin +:page-description: How to test your CircleCI server 4.9 installation in an air-gapped environment. +:experimental: + +Test your air-gapped installation using CircleCI's Reality Check project. + +[#copy-reality-check] +== 1. Copy CircleCI server Reality Check +Copy the link:https://github.com/circleci/realitycheck[CircleCI server Reality Check repository] to your GitHub Enterprise installation in your air-gapped environment. + +[#setup-reality-check] +== 2. Set up CircleCI server Reality Check +Set up CircleCI server Reality Check using the link:https://github.com/circleci/realitycheck#installation[instructions in the README]. + +CAUTION: Make sure to set the `CIRCLE_CLOUD_PROVIDER` environment variable to `other`. + +[#modify-reality-check] +== 3. Modify Reality Check source code +Replace any references to Docker images in `.circleci/config.yml` with your internal Docker registry. + +For example, in your GitHub Enterprise instance, change the following: + +[source, yaml] +---- + ... + artifacts_test_results: + docker: + - image: python:3.6.0 +---- + + +[source, yaml] +---- + ... + artifacts_test_results: + docker: + - image: <your-internal-registry-hostname-and-port>/python:3.6.0 +---- + +NOTE: Leave image values nested under "machine" as is. Only modify Docker image values. + + +[#run-reality-check] +== 4. Run Reality Check +Run Reality Check by pushing a commit to it in your GitHub Enterprise installation. + +NOTE: the `aws_jobs` and `gcp_jobs` workflows will be cancelled automatically due to the `CIRCLE_CLOUD_PROVIDER` environment variable being set to `other`. diff --git a/docs/server-admin-4.9/modules/installation/pages/hardening-your-cluster.adoc b/docs/server-admin-4.9/modules/installation/pages/hardening-your-cluster.adoc new file mode 100644 index 0000000000..c37f9cb76c --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/hardening-your-cluster.adoc @@ -0,0 +1,279 @@ += Hardening your cluster +:page-platform: Server 4.9, Server Admin +:page-description: This section provides supplemental information on hardening your Kubernetes cluster for CircleCI server 4.9. +:experimental: + +This section provides supplemental information on hardening your Kubernetes cluster. + +[#network-topology] +== Network topology +A server installation basically runs three different type of compute instances: The Kubernetes nodes, Nomad clients, and external VMs. + +Best practice is to make as many of the resources as private as possible. If users will access your CircleCI server installation via VPN, there is no need to assign any public IP addresses, as long as you have a NAT gateway setup. Otherwise, you will need at least one public subnet for the `circleci-proxy` load balancer. + +It is also recommended to place Nomad clients and VMs in a public subnet to enable users to SSH into jobs and scope access via networking rules. + +NOTE: An nginx reverse proxy is placed in front of link:https://github.com/Kong/charts[Kong] and exposed as a Kubernetes service named `circleci-proxy`. nginx is responsible routing the traffic to the following services: `kong` and `nomad`. + +CAUTION: When using Amazon Certificate Manager (ACM), the name of the nginx service will be `circleci-proxy-acm` instead of `circleci-proxy`. If you have switched from some other method of handling your TLS certificates to using ACM, this change will recreate the load balancer and you will have to reroute your associated DNS records for your `<domain>` and `app.<domain>`. + +CAUTION: When using Nomad, clients and servers should be configured to use MTLS for secure communication. + +[#network-traffic] +== Network traffic +This section explains the minimum requirements for a server installation to work. Depending on your workloads, you might need to add additional rules to egress for Nomad clients and VMs. As nomenclature between cloud providers differs, you will probably need to implement these rules using firewall rules and/or security groups. + +Where you see "external," this usually means all external IPv4 addresses. Depending on your particular setup, you might be able to be more specific (for example, if you are using a proxy for all external traffic). + +The rules explained here are assumed to be stateful and for TCP connections only, unless stated otherwise. If you are working with stateless rules, you need to create matching ingress or egress rules for the ones listed here. + +[#reverse-proxy-status] +=== Reverse proxy status +You may wish to check the status of the services routing traffic in your CircleCI server installation and alert if there are any issues. Since we use both nginx and Kong in CircleCI server, we expose the status pages of both via port 80. + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Service +| Endpoint + +| nginx +| `/nginx_status` + +| Kong +| `/kong_status` +|=== + +[#kubernetes-load-balancers] +## Kubernetes load balancers +Depending on your setup, your load balancers might be transparent (that is, they are not treated as a distinct layer in your networking topology). In this case, you can apply the rules from this section directly to the underlying destination or source of the network traffic. Refer to the documentation of your cloud provider to make sure you understand how to correctly apply networking security rules, given the type of load balancing used by your installation. + +[#ingress-load-balancers] +=== Ingress +If the traffic rules for your load balancers have not been created automatically, here are their respective ports: + +[.table.table-striped] +[cols=4*, options="header", stripes=even] +|=== +| Name +| Port +| Source +| Purpose + +| `circleci-proxy/-acm` +| 80 +| External +| User interface and frontend API + +| `circleci-proxy/-acm` +| 443 +| External +| User interface and frontend API + +| `circleci-proxy/-acm` +| 3000 +| Nomad clients +| Communication with Nomad clients + +| `circleci-proxy/-acm` +| 4647 +| Nomad clients +| Communication with Nomad clients + +| `circleci-proxy/-acm` +| 8585 +| Nomad clients +| Communication with Nomad clients +|=== + +[#egress-load-balancers] +=== Egress +The only egress needed is for TCP traffic to the Kubernetes nodes on the Kubernetes load balancer ports (30000-32767). This egress is not needed if your load balancers are transparent. + +[#common-rules-for-compute-instances] +== Common rules for compute instances +These rules apply to all compute instances, but not to the load balancers. + +[#ingress-common] +=== Ingress +If you want to access your instances using SSH, you will need to open port 22 for TCP connections for the instances in question. +It is recommended to scope the rule as closely as possible to allowed source IP addresses and/or only add such a rule when needed. + +[#egress-common] +=== Egress +You most likely want all of your instances to access internet resources. This requires allowing egress for UDP and TCP on port 53 to the DNS server within your VPC, and TCP ports 80 and 443 for HTTP and HTTPS traffic. +Instances building jobs (that is, the Nomad clients and external VMs) also will likely need to pull code from your VCS using SSH (TCP port 22). SSH is also used to communicate with external VMs, and should be allowed for all instances with the destination of the VM subnet and your VCS. + +[#kubernetes-nodes] +== Kubernetes nodes + +[#intra-node-traffic] +=== Intra-node traffic +By default, the traffic within your Kubernetes cluster is regulated by networking policies. This should be sufficient to regulate the traffic between pods. No additional requirement are needed to reduce traffic between Kubernetes nodes any further (it is fine to allow all traffic between Kubernetes nodes). + +To make use of networking policies within your cluster, you may need to take additional steps, depending on your cloud provider and setup. Here are some resources to get you started: + +* link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[Kubernetes Network Policy Overview] +* link:https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy[Creating a Cluster Network Policy on Google Cloud] +* link:https://docs.aws.amazon.com/eks/latest/userguide/calico.html[Installing Calico on Amazon EKS] + +[#ingress-kubernetes] +=== Ingress +If you are using a managed service, you can check the rules created for the traffic coming from the load balancers and the allowed port range. The standard port range for Kubernetes load balancers (30000-32767) should be all that is needed here for ingress. If you are using transparent load balancers, you need to apply the ingress rules listed for load balancers above. + +[#egress-kubernetes] +=== Egress + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Destination +| Purpose + +| 4647 +| Nomad clients +| Communication with the Nomad clients + +| all traffic +| other nodes +| Allow intra-cluster traffic +|=== + +[#nomad-clients-ingress-egress] +== Nomad clients +Nomad clients do not need to communicate with each other. You can block traffic between Nomad client instances completely. + +[#ingress-nomad] +=== Ingress +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Source +| Purpose + +| 4647 +| K8s nodes +| Communication with Nomad server + +| 64535-65535 +| External +| Rerun jobs with SSH functionality +|=== + +[#egress-nomad] +=== Egress +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Destination +| Purpose + +| 22 +| VMs +| SSH communication with VMs + +| 4647 +| Nomad Load Balancer +| Internal communication +|=== + +[#external-vms] +== External VMs +Similar to Nomad clients, there is no need for external VMs to communicate with each other. + +[#ingress-external] +=== Ingress +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Source +| Purpose + +| 22 +| Kubernetes nodes +| Internal communication + +| 22 +| Nomad clients +| Internal communication + +| 2376 +| Kubernetes nodes +| Internal communication + +| 2376 +| Nomad clients +| Internal communication + +| 54782 +| External +| Rerun jobs with SSH functionality +|=== + +[#egress-external] +=== Egress +You will only need the egress rules for internet access and SSH for your VCS. + +[#notes-on-aws-networkingl] +== Notes on AWS networking with machine provisioner +When using the EC2 provider for machine provisioner, there is an `assignPublicIP` option available in the `values.yaml` file. + +[source,yaml] +---- +machine_provisioner: + ... + providers: + ec2: + ... + assignPublicIP: false +---- + +By default, this option is set to false, meaning any instance created by machine provisioner will only be assigned a private IP address. + +[#private-ips-only] +=== Private IP addresses only +When the `assignPublicIP` option is set to false, restricting traffic with security group rules between services can be done using the link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html[Source Security Group ID parameter]. + +Within the ingress rules of the VM security group, the following rules can be created to harden your installation: + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Origin +| Purpose + +| 54782 +| CIDR range of your choice +| Allows users to SSH into failed virtual machine based jobs and to retry and debug + +|=== + +[#using-public-ips] +=== Using public IP addresses + +When the `assignPublicIP` option is set to true, all EC2 instances created by machine provisioner are assigned **public** IPv4 addresses. Also, all services communicating with them do so via their public addresses. + +When hardening an installation where the machine provisioner uses public IP addresses, the following rules can be created: + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Port +| Origin +| Purpose + +| 54782 +| CIDR range of your choice +| Allows users to SSH into failed virtual machine based jobs to retry and debug. + +|=== + +ifndef::pdf[] +## Next steps +* xref:operator:operator-overview.adoc[server 4.9 Operator Overview] +endif::[] diff --git a/docs/server-admin-4.9/modules/installation/pages/installation-reference.adoc b/docs/server-admin-4.9/modules/installation/pages/installation-reference.adoc new file mode 100644 index 0000000000..d3c99c3de6 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/installation-reference.adoc @@ -0,0 +1,1520 @@ += Installation reference +:page-platform: Server 4.9, Server Admin +:page-description: Reference documentation for installing CircleCI server 4.9. +:experimental: + +[#example-manifests] +== Example manifests +The following are example manifests that contain the basic required parameters necessary to spin up the `circleci-server` Helm installation. + +[#aws] +=== AWS +The snippet below is an example manifest of the necessary parameters for an installation of CircleCI server in an AWS environment. Note that this installation uses IAM roles for service accounts (IRSA), which is recommended. Fields with base64 encoding are marked as such. + +[source,yaml] +---- +global: + domainName: "<full-domain-name-of-your-install>" + license: '<license>' + container: + registry: cciserver.azurecr.io + org: + +apiToken: "<circleci-api-token>" +sessionCookieKey: "<session-cookie-key>" + +keyset: + signing: '<generated-signing-key>' + encryption: '<generated-encryption-key>' + +nomad: + server: + gossip: + encryption: + key: "<nomad-gossip-encryption-key>" + rpc: + mTLS: + enabled: true + CACertificate: "<nomad-mtls-base64-ca>" + certificate: "<nomad-mtls-base64-cert>" + privateKey: "<nomad-mtls-base64-key>" + +object_storage: + bucketName: '<s3-bucket-name>' + s3: + enabled: true + endpoint: "<aws-region-url>" # ex: https://s3.us-east-1.amazonaws.com + region: "<aws-region>" + irsaRole: "<arn-of-irsa-role>" + +github: + clientId: "<generated-github-client-id>" + clientSecret: "<generated-github-client-secret>" + +machine_provisioner: + providers: + ec2: + enabled: true + region: "<aws-region>" + subnets: + - "<subnet-id>" + securityGroupId: "<security-group-id>" + irsaRole: "<arn-of-irsa-role>" + tags: + name1: "value1" + name2: "value2" + +mongodb: + auth: + rootPassword: "<mongodb-root-password>" + password: "<mongodb-password>" + +postgresql: + auth: + postgresPassword: "<postgres-password>" + +pusher: + secret: "<pusher-secret>" + +rabbitmq: + auth: + password: "<rabbitmq-password>" + erlangCookie: "<rabbitmq-erlang-cookie>" + +---- + +[#gcp] +=== GCP +The below is an example manifest of the necessary parameters for an installation of CircleCI server in a GCP environment. Note that this installation uses Workload Identity, which is recommended. Fields with base64 encoding are marked as such. + +[source,yaml] +---- +global: + domainName: "<full-domain-name-of-your-install>" + license: '<license-for-circleci-server>' + container: + registry: cciserver.azurecr.io + org: + +apiToken: "<circleci-api-token>" +sessionCookieKey: "<session-cookie-key>" + +keyset: + signing: '<generated-signing-key>' + encryption: '<generated-encryption-key>' + +github: + clientId: "<generated-github-client-id>" + clientSecret: "<generated-github-client-secret>" + +object_storage: + bucketName: "<gcs-bucket-name>" + gcs: + enabled: true + workloadIdentity: "<service-account-email-with-gcs-access>" + +mongodb: + auth: + rootPassword: "<mongodb-root-password>" + password: "<mongodb-password>" + +machine_provisioner: + providers: + gcp: + enabled: true + project_id: <gcp-project-id> + network_tags: + - <network-tag> + zones: + - <gcp-zone1> + - <gcp-zone2> + network: "<gcp-network>" + subnetwork: "" # leave blank for auto-subnetting + workloadIdentity: "<service-account-email-with-compute-access>" + +pusher: + secret: "<pusher-secret>" + +postgresql: + auth: + postgresPassword: "<postgres-password>" + +rabbitmq: + auth: + password: "<rabbitmq-password>" + erlangCookie: "<rabbitmq-erlang-cookie>" + +nomad: + server: + gossip: + encryption: + key: "<nomad-gossip-encryption-key>" + rpc: + mTLS: + enabled: true + CACertificate: "<nomad-mtls-base64-ca>" + certificate: "<nomad-mtls-base64-cert>" + privateKey: "<nomad-mtls-base64-key>" +---- + +[#all-values-yaml-options] +== All Helm `values.yaml` options + +pass:[<!-- vale off -->] +[.table-scroll] +-- +[.datatable.cols=4*] +|=== +|Key |Type |Default |Description + +|api_service.replicas |int |`+1+` |Number of replicas to deploy for the +api-service deployment. + +|api_service.resources.limits.cpu |string |`+"2000m"+` |CPU limit for +the api-service deployment. + +|api_service.resources.limits.memory |string |`+"8Gi"+` |Memory limit +for the api-service deployment. + +|audit_log_service.replicas |int |`+1+` |Number of replicas to deploy +for the audit-log-service deployment. + +|audit_log_service.resources.limits.cpu |string |`+"2000m"+` |CPU limit +for the audit-log-service deployment. + +|audit_log_service.resources.limits.memory |string |`+"6Gi"+` |Memory +limit for the audit-log-service deployment. + +|authentication_service.auth_api.replicas |int |`+1+` |Number of +replicas to deploy for the authentication-service auth api deployment. + +|authentication_service.auth_api.resources.limits.cpu |int |`+2+` |CPU +limit for the authentication-service auth api deployment. + +|authentication_service.auth_api.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the authentication-service auth api +deployment. + +|authentication_service.internal_api.replicas |int |`+1+` |Number of +replicas to deploy for the authentication-service internal api +deployment. + +|authentication_service.internal_api.resources.limits.cpu |int |`+2+` +|CPU limit for the authentication-service internal api deployment. + +|authentication_service.internal_api.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the authentication-service internal api +deployment. + +|authentication_service.login_api.replicas |int |`+1+` |Number of +replicas to deploy for the authentication-service login api deployment. + +|authentication_service.login_api.resources.limits.cpu |int |`+2+` |CPU +limit for the authentication-service login api deployment. + +|authentication_service.login_api.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the authentication-service login api +deployment. + +|authentication_service.worker.replicas |int |`+1+` |Number of replicas +to deploy for the authentication-service worker deployment. + +|authentication_service.worker.resources.limits.cpu |int |`+2+` |CPU +limit for the authentication-service worker deployment. + +|authentication_service.worker.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the authentication-service worker +deployment. + +|branch_service.replicas |int |`+1+` |Number of replicas to deploy for +the branch-service deployment. + +|branch_service.resources.limits.cpu |string |`+"1000m"+` |CPU limit for +the branch-service deployment. + +|branch_service.resources.limits.memory |string |`+"5Gi"+` |Memory limit +for the branch-service deployment. + +|builds_service.replicas |int |`+1+` |Number of replicas to deploy for +the builds-service deployment. + +|builds_service.resources.limits.cpu |string |`+"1500m"+` |CPU limit for +the builds-service deployment. + +|builds_service.resources.limits.memory |string |`+"6Gi"+` |Memory limit +for the builds-service deployment. + +|ciam_gateway.internal_api.replicas |int |`+1+` |Number of replicas to +deploy for the ciam-gateway-service internal deployment. + +|ciam_gateway.internal_api.resources.limits.cpu |int |`+2+` |CPU limit +for the ciam-gateway-service internal api deployment. + +|ciam_gateway.internal_api.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the ciam-gateway-service internal api deployment. + +|ciam_gateway.outgoing_api.replicas |int |`+1+` |Number of replicas to +deploy for the ciam-gateway-service outgoing deployment. + +|ciam_gateway.outgoing_api.resources.limits.cpu |int |`+2+` |CPU limit +for the ciam-gateway-service outgoing api deployment. + +|ciam_gateway.outgoing_api.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the ciam-gateway-service outgoing api deployment. + +|ciam_gateway.public_api.replicas |int |`+1+` |Number of replicas to +deploy for the ciam-gateway-service public deployment. + +|ciam_gateway.public_api.resources.limits.cpu |int |`+2+` |CPU limit for +the ciam-gateway-service public api deployment. + +|ciam_gateway.public_api.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the ciam-gateway-service public api deployment. + +|ciam_service.internal_admin_api.replicas |int |`+1+` |Number of +replicas to deploy for the ciam-service internal admin deployment. + +|ciam_service.internal_admin_api.resources.limits.cpu |int |`+2+` |CPU +limit for the ciam-service internal admin api deployment. + +|ciam_service.internal_admin_api.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the ciam-service internal admin api +deployment. + +|contexts_service.replicas |int |`+1+` |Number of replicas to deploy for +the contexts-service deployment. + +|contexts_service.resources.limits.cpu |string |`+"500m"+` |CPU limit +for the contexts-service deployment. + +|contexts_service.resources.limits.memory |string |`+"5Gi"+` |Memory +limit for the contexts-service deployment. + +|cron_service.replicas |int |`+1+` |Number of replicas to deploy for the +cron-service deployment. + +|cron_service.resources.limits.cpu |string |`+"2000m"+` |CPU limit for +the cron-service deployment. + +|cron_service.resources.limits.memory |string |`+"6Gi"+` |Memory limit +for the cron-service deployment. + +|distributor.agent_base_url |string +|`+"https://circleci-binary-releases.s3.amazonaws.com/circleci-agent"+` +|location of the task-agent. When airgapped, the task-agent will need to +be hosted within the airgap and this value updated + +|distributor_cleaner.replicas |int |`+1+` |Number of replicas to deploy +for the distributor-cleaner deployment. + +|distributor_cleaner.resources.limits.cpu |string |`+"500m"+` |CPU limit +for the distributor-cleaner deployment. + +|distributor_cleaner.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the distributor-cleaner deployment. + +|distributor_dispatcher.replicas |int |`+1+` |Number of replicas to +deploy for the distributor-dispatcher deployment. + +|distributor_dispatcher.resources.limits.cpu |string |`+"500m"+` |CPU +limit for the distributor-dispatcher deployment. + +|distributor_dispatcher.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the distributor-dispatcher deployment. + +|distributor_external.replicas |int |`+1+` |Number of replicas to deploy +for the distributor-external deployment. + +|distributor_external.resources.limits.cpu |string |`+"500m"+` |CPU +limit for the distributor-external deployment. + +|distributor_external.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the distributor-external deployment. + +|distributor_internal.replicas |int |`+1+` |Number of replicas to deploy +for the distributor-internal deployment. + +|distributor_internal.resources.limits.cpu |string |`+"500m"+` |CPU +limit for the distributor-internal deployment. + +|distributor_internal.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the distributor-internal deployment. + +|docker_provisioner.agent_base_url |string +|`+"https://circleci-binary-releases.s3.amazonaws.com/docker-provisioner"+` +|Location of the docker-provisioner agent. When air-gapped, the +docker-provisioner agent will need to be hosted within the air-gap and +this value updated + +|docker_provisioner.custom_config |string |`+""+` |Path to config with +information about docker resource-classes + +|docker_provisioner.enabled |bool |`+true+` | + +|docker_provisioner.external.replicas |int |`+1+` |Number of replicas to +deploy for the docker-provisioner-externalapi deployment. + +|docker_provisioner.external.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the docker-provisioner-internalapi +deployment + +|docker_provisioner.internal.replicas |int |`+1+` |Number of replicas to +deploy for the docker-provisioner-internalapi deployment. + +|docker_provisioner.internal.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the docker-provisioner-internalapi +deployment + +|docker_provisioner.plugin_repository_url |string +|`+"https://circleci-binary-releases.s3.amazonaws.com"+` |Location of +the agent plugin binaries. When air-gapped, the plugin binaries will +need to be hosted within the air-gap and this value updated + +|docker_provisioner.provisioner.replicas |int |`+1+` |Number of replicas +to deploy for the docker-provisioner-provisioner deployment. + +|docker_provisioner.provisioner.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the docker-provisioner-provisioner +deployment + +|docker_provisioner.reaperContainerRepository |string |`+""+` +|Repository to use to download the reaper container. Must contain the +`+pause:3.6+` image + +|domain_service.providersMangerMaxPoolSize |int |`+10+` |Max pool size +for the providers manager + +|domain_service.replicas |int |`+1+` |Number of replicas to deploy for +the domain-service deployment. + +|domain_service.resources.limits.cpu |string |`+"2000m"+` |CPU limit for +the domain-service deployment. + +|domain_service.resources.limits.memory |string |`+"5Gi"+` |Memory limit +for the domain-service deployment. + +|execution_gateway.api.replicas |int |`+1+` |Number of replicas to +deploy for the execution-gateway-api deployment. + +|execution_gateway.api.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the execution-gateway-api deployment + +|execution_gateway.force_legacy_ui |string |`+"false"+` | + +|execution_gateway.plan_concurrency |int |`+2500+` |Maximum concurrency +you wish to permit per org in your environment + +|execution_gateway.public_api.replicas |int |`+1+` |Number of replicas +to deploy for the execution-gateway-public-api deployment. + +|execution_gateway.public_api.resources.limits.memory |string +|`+"1536Mi"+` |Memory limit for the execution-gateway-publicapi +deployment + +|feature_flags_api.replicas |int |`+1+` |Number of replicas to deploy +for the feature-flags-api deployment. + +|feature_flags_api.resources.limits.memory |string |`+"512Mi"+` |Memory +limit for the feature-flags-api deployment. + +|frontend.jvmHeapSize |string |`+"3g"+` | + +|frontend.replicas |int |`+1+` |Number of replicas to deploy for the +frontend deployment. + +|frontend.resources.limits.cpu |string |`+"2000m"+` |CPU limit for the +frontend deployment. + +|frontend.resources.limits.memory |string |`+"5Gi"+` |Memory limit for +the frontend deployment. + +|github |object +|`+{"clientId":"","clientSecret":"","enterprise":false,"fingerprint":null,"hostname":"ghe.example.com","scheme":"https","unsafeDisableWebhookSSLVerification":false}+` +|VCS Configuration details (currently limited to Github Enterprise and +Github.com) + +|github.clientId |string |`+""+` |Client ID for OAuth Login via Github +(2 Options). + +*Option 1:* Set the value here and CircleCI will +create the secret automatically. + +*Option 2:* Leave this blank, +and create the secret yourself. CircleCI will assume it exists. + + +Create on by Navigating to Settings > Developer Settings > OAuth Apps. +Your homepage should be set to +`+{{ .Values.global.scheme }}://{{ .Values.global.domainName }}+` and +callback should be +`+{{ .Value.scheme }}://{{ .Values.global.domainName }}/auth/github+`. + +|github.clientSecret |string |`+""+` |Client Secret for OAuth Login via +Github (2 Options). + +*Option 1:* Set the value here and CircleCI +will create the secret automatically. + +*Option 2:* Leave this +blank, and create the secret yourself. CircleCI will assume it exists. ++ + Retrieved from the same location as specified in github.clientID. + +|github.enterprise |bool |`+false+` |Set to `+true+` for Github +Enterprise and `+false+` for Github.com + +|github.fingerprint |string |`+nil+` |Required when it is not possible +to directly `+ssh-keyscan+` a GitHub Enterprise instance. It is not +possible to proxy `+ssh-keyscan+`. + +|github.hostname |string |`+"ghe.example.com"+` |Github hostname. +Ignored on Github.com. This is the hostname of your Github Enterprise +installation. + +|github.scheme |string |`+"https"+` |One of '`http`' or '`https`'. +Ignored on Github.com. Set to '`http`' if your Github Enterprise +installation is not using TLS. + +|github.unsafeDisableWebhookSSLVerification |bool |`+false+` |Disable +SSL Verification in webhooks. This is not safe and shouldn’t be done in +a production scenario. This is required if your Github installation does +not trust the certificate authority that signed your Circle server +certificates (e.g they were self signed). + +|global.container.org |string |`+""+` |The registry organization to pull +all images from (if in use), defaults to none. + +|global.container.registry |string |`+"cciserver.azurecr.io"+` |The +registry to pull all images from, defaults to "`cciserver.azurecr.io`". + +|global.domainName |string |`+""+` |Domain name of your CircleCI install + +|global.imagePullSecrets[0] |string |`+"regcred"+` | + +|global.license |string |`+""+` |License (2 Options) For your CircleCI +Installation + +*Option 1:* Set the value global.license and +CircleCI will create the secret automatically. + +*Option 2:* Leave +this blank, and create the secret yourself. CircleCI will assume it +exists. + +The secret must be named '`license`' and have the key +'`license`' (where the value equals the base64 encoded string of your +license). It must be in the same namespace as your installation. + +|global.nodeAffinity |object |`+{}+` |NodeAffinity template to apply to +all CircleCI pods + +|global.nodeSelector |object |`+{}+` |NodeSelector template to apply to +all CircleCI pods + +|global.scheme |string |`+"https"+` |Scheme for your CircleCI install + +|global.tolerations |object |`+{}+` |Tolerations to apply to all +CircleCI pods + +|global.tracing.collector_host |string |`+""+` | + +|global.tracing.enabled |bool |`+false+` | + +|global.tracing.sample_rate |float |`+1+` | + +|insights_service.dailyCronHour |int |`+3+` |Defaults to 3AM local +server time. + +|insights_service.hourlyCronMinute |int |`+35+` |Defaults to 35 minutes +past the hour. + +|insights_service.isEnabled |bool |`+true+` |Whether or not to enable +the insights-service deployment. + +|insights_service.replicas |int |`+1+` |Number of replicas to deploy for +the insights-service deployment. + +|insights_service.skipPermissionsCheck |bool |`+false+` |Enable to skip +the permissions check on the org page and show all projects + +|keyset |object |`+{"encryption":"","signing":""}+` |Keysets (2 Options) +used to encrypt and sign artifacts generated by CircleCI. You need these +values to configure server. + +*Option 1:* Set the values +keyset.signing and keyset.encryption here and CircleCI will create the +secret automatically. + +*Option 2:* Leave this blank, and create +the secret yourself. CircleCI will assume it exists. + +The secret +must be named '`signing-keys`' and have the keys; signing-key, +encryption-key. + +|keyset.encryption |string |`+""+` |Encryption Key To generate an +artifact ENCRYPTION key run: +`+docker run circleci/server-keysets:latest generate encryption -a stdout+` + +|keyset.signing |string |`+""+` |Signing Key To generate an artifact +SIGNING key run: +`+docker run circleci/server-keysets:latest generate signing -a stdout+` + +|kong.acme.email |string |`+"your-email@example.com"+` | + +|kong.acme.enabled |bool |`+false+` |This setting controls the automatic +fetching and renewal of Let’s Encrypt certificates. It defaults to +false. If set to true, you must ensure that the appropriate DNS entries +are in place after the Helm install/upgrade. The ACME certificate will +only be generated when a valid DNS entry is configured for your domain +(and the app subdomain). + +|kong.debug_level |string |`+"notice"+` |Debug level for Kong. Available +levels: `+debug+`, `+info+`, `+warn+`, `+error+`, `+crit+`, `+notice+`. + +|kong.image.repository |string |`+"kong"+` |The Docker image repository +for Kong. Note this repository is not managed by CircleCI. + +|kong.image.tag |string |`+"3.4.2"+` |The Kong image tag. Kong has been +tested against this specific version tag; edit this value at your own +risk. + +|kong.nginx_worker_processes |int |`+10+` |Determines the number of +worker processes spawned by Nginx. + +|kong.replicas |int |`+1+` | + +|kong.resources.limits.cpu |string |`+"3072m"+` |CPU limit for the kong +deployment. + +|kong.resources.limits.memory |string |`+"3072Mi"+` |Memory limit for +the kong deployment. + +|kong.resources.requests.cpu |string |`+"512m"+` |CPU request for the +kong deployment. + +|kong.resources.requests.memory |string |`+"512Mi"+` |Memory request for +the kong deployment. + +|kong.status_page |bool |`+false+` |Set to true for public health check +page (kong) for load balancers to hit + +|legacy_notifier.replicas |int |`+1+` |Number of replicas to deploy for +the legacy-notifier deployment. + +|legacy_notifier.resources.limits.cpu |string |`+"2000m"+` |CPU limit +for the legacy-notifier deployment. + +|legacy_notifier.resources.limits.memory |string |`+"5Gi"+` |Memory +limit for the legacy-notifier deployment. + +|machine_provisioner.agent_base_url |string +|`+"https://circleci-binary-releases.s3.amazonaws.com/machine-provisioner"+` +|Location of the machine-provisioner agent. When air-gapped, the +machine-provisioner agent will need to be hosted within the air-gap and +this value updated + +|machine_provisioner.agent_download_timeout_seconds |int |`+10+` +|Timeout when attempting to download task-agent or docker-agent (remote +docker) in machine-agent + +|machine_provisioner.custom_config |string |`+""+` |Path to config with +information about images/providers/resource-classes + +|machine_provisioner.demandFudgeFactor |int |`+2+` |demandFudgeFactor +multiplies the demand from distributor with an additional factor + +|machine_provisioner.dlcDockerDiskSizeGB |int |`+100+` +|dlcDockerDiskSizeGB Configure size of docker disk size. Used for ratio +to prune on + +|machine_provisioner.dlcMaxDiskThresholdGB |int |`+15+` +|dlcMaxDiskThresholdGB configure dlc max disk threshold + +|machine_provisioner.dlcUnusedLifespanDays |int |`+3+` +|dlcUnusedLifespanDays Configure how long to keep dlc images and build +cache for + +|machine_provisioner.enabled |bool |`+true+` | + +|machine_provisioner.external.replicas |int |`+1+` |Number of replicas +to deploy for the machine-provisioner-externalapi deployment. + +|machine_provisioner.external.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the machine-provisioner-externalapi +deployment + +|machine_provisioner.fudgeConstantTerm |int |`+0+` |fudgeConstantTerm +adds to the results for the forecast rules. + +|machine_provisioner.fudgeScaleFactor |float |`+1.4+` |fudgeScaleFactor +multiplies the results for the forecast rules. + +|machine_provisioner.installID |string |`+"production"+` |Unique tag +machine provisioner applies to machines it manages. + +|machine_provisioner.internal.replicas |int |`+1+` |Number of replicas +to deploy for the machine-provisioner-internalapi deployment. + +|machine_provisioner.internal.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the machine-provisioner-internalapi +deployment + +|machine_provisioner.leader.replicas |int |`+1+` |Number of replicas to +deploy for the machine-provisioner-leader deployment. + +|machine_provisioner.leader.resources.limits.memory |string |`+"512Mi"+` +|Memory limit for the machine-provisioner-leader deployment + +|machine_provisioner.machine_agent_download_timeout_seconds |int |`+10+` +|Timeout when attempting to download machine-agent onto a VM + +|machine_provisioner.plugin_repository_url |string +|`+"https://circleci-binary-releases.s3.amazonaws.com"+` |Location of +the agent plugin binaries. When air-gapped, the plugin binaries will +need to be hosted within the air-gap and this value updated + +|machine_provisioner.providers |object +|`+{"ec2":{"accessKey":"","assignPublicIP":false,"assumedRoleArn":"","enabled":false,"iops":0,"irsaRole":"","linuxAMI":"","region":"us-west-1","secretKey":"","securityGroupId":"sg-123","subnets":["subnet-abc","subnet-def"],"tags":{"key1":"value1","key2":"value2"},"throughput":0,"windowsAMI":""},"gcp":{"assignPublicIP":true,"enabled":false,"linuxImage":"","network":"default","network_tags":["circleci-vm"],"project_id":"my-server-project","region":"us-central1","service_account":{"project_id":"... ...","type":"service_account"},"subnetwork":"my-server-vm-subnet","windowsImage":"","workloadIdentity":"","zones":["us-central1-a","us-central1-b","us-central1-c","us-central1-f"]}}+` +|Provider configuration for Machine Provisioner. + +|machine_provisioner.providers.ec2.accessKey |string |`+""+` |EC2 +Authentication Config (3 Options). + +*Option 1:* Set accessKey and +secretKey here, and CircleCI will create the secret for you. + + +*Option 2:* Leave accessKey and secretKey blank, and create the secret +yourself. CircleCI will assume it exists. + +*Option 3:* Leave +accessKey and secretKey blank, and set the irsaRole field (IAM roles for +service accounts). + +|machine_provisioner.providers.ec2.assumedRoleArn |string |`+""+` +|Configure a role for Remote Docker/Machine jobs to assume + +|machine_provisioner.providers.ec2.enabled |bool |`+false+` |Set to +enable EC2 as a virtual machine provider + +|machine_provisioner.providers.ec2.iops |int |`+0+` |EBS volume IOPS +value. Backend default is 5000 if not specified. + +IOPS can range +from 3,000 to 16,000 for gp3 volumes. + +Maximum of 500 IOPS per GiB +of volume size. + +|machine_provisioner.providers.ec2.throughput |int |`+0+` |EBS volume +throughput in MB/s. Backend default is 500 if not specified. + + +Throughput can range from 125 to 1,000 MiB/s for gp3 volumes. + +|machine_provisioner.providers.gcp.enabled |bool |`+false+` |Set to +enable GCP Compute as a VM provider + +|machine_provisioner.providers.gcp.service_account |object +|`+{"project_id":"... ...","type":"service_account"}+` |GCP Compute +Authentication Config (3 Options). + +*Option 1:* Set +service_account with the service account JSON (raw JSON, not a string), +and CircleCI will create the secret for you. + +*Option 2:* Leave +the service_account field as its default, and create the secret +yourself. CircleCI will assume it exists. + +*Option 3:* Leave the +service_account field as its default, and set the workloadIdentityField +with a service account email to use workload identities. + +|machine_provisioner.provisioner.replicas |int |`+1+` |Number of +replicas to deploy for the machine-provisioner-provisioner deployment. + +|machine_provisioner.provisioner.resources.limits.memory |string +|`+"512Mi"+` |Memory limit for the machine-provisioner-leader deployment + +|machine_provisioner.terminatePendingLinuxAfter |string |`+"6m"+` |Linux +pending machine timeout. Machine instances will be terminted if they +take longer than this to start + +|machine_provisioner.terminatePendingWindowsAfter |string |`+"6m"+` +|Windows pending machine timeout. Machine instances will be terminted if +they take longer than this to start + +|mongodb.architecture |string |`+"standalone"+` | + +|mongodb.auth.database |string |`+"admin"+` | + +|mongodb.auth.existingSecret |string |`+""+` | + +|mongodb.auth.mechanism |string |`+"SCRAM-SHA-1"+` | + +|mongodb.auth.password |string |`+""+` | + +|mongodb.auth.rootPassword |string |`+""+` | + +|mongodb.auth.username |string |`+"root"+` | + +|mongodb.fullnameOverride |string |`+"mongodb"+` | + +|mongodb.hosts |string |`+"mongodb:27017"+` |MongoDB host. This can be a +comma-separated list of multiple hosts for sharded instances. + +|mongodb.image.tag |string |`+"3.6.22-debian-9-r38"+` | + +|mongodb.internal |bool |`+true+` |Set to false if you want to use an +externalized MongoDB instance. + +|mongodb.labels.app |string |`+"mongodb"+` | + +|mongodb.labels.layer |string |`+"data"+` | + +|mongodb.options |string |`+""+` | + +|mongodb.persistence.size |string |`+"8Gi"+` |To increase PVC size, +follow this guide: +https://circleci.com/docs/server/operator/expanding-internal-database-volumes + +|mongodb.podAnnotations.”backup.velero.io/backup-volumes” |string +|`+"datadir"+` | + +|mongodb.podLabels.app |string |`+"mongodb"+` | + +|mongodb.podLabels.layer |string |`+"data"+` | + +|mongodb.ssl |bool |`+false+` | + +|mongodb.tlsInsecure |bool |`+false+` |If using an SSL connection with +custom CA or self-signed certs, set this to true + +|mongodb.useStatefulSet |bool |`+true+` | + +|nginx.annotations.”service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled” +|string |`+"true"+` | + +|nginx.annotations.”service.beta.kubernetes.io/aws-load-balancer-type” +|string |`+"nlb"+` |Use `+nlb+` for Network Load Balancer and `+clb+` +for Classic Load Balancer see +https://aws.amazon.com/elasticloadbalancing/features/ for feature +comparison. + +|nginx.aws_acm.enabled |bool |`+false+` |⚠️ WARNING: Enabling this will +recreate frontend’s service which will recreate the load balancer. If +you are updating your deployed settings, then you will need to route +your frontend domain to the new loadbalancer. You will also need to add +`+service.beta.kubernetes.io/aws-load-balancer-ssl-cert: <acm-arn>+` to +the `+nginx.annotations+` block. + +|nginx.image.repository |string |`+"nginxinc/nginx-unprivileged"+` |The +Docker image repository for NGINX. Note this repository is not managed +by CircleCI. + +|nginx.image.tag |string |`+"1.28.0"+` |Nginx has been tested against +this specific version tag; edit this value at your own risk. + +|nginx.loadBalancerIp |string |`+""+` |Load Balancer IP. To use a static +IP for the provisioned load balancer with GCP, set to a reserved static +ipv4 address + +|nginx.loadBalancerSourceRanges |list |`+[]+` |Load Balancer Source IP +CIDRs List of IP CIDRs allowed access to load balancer + +|nginx.private_load_balancers |bool |`+false+` | + +|nginx.replicas |int |`+1+` | + +|nginx.resources.limits.cpu |string |`+"3000m"+` |CPU limit for the +nginx deployment. + +|nginx.resources.limits.memory |string |`+"3072Mi"+` |Memory limit for +the nginx deployment. + +|nginx.resources.requests.cpu |string |`+"500m"+` |CPU request for the +nginx deployment. + +|nginx.resources.requests.memory |string |`+"512Mi"+` |Memory request +for the nginx deployment. + +|nomad.auto_scaler.aws.accessKey |string |`+""+` |AWS Authentication +Config (3 Options). + +*Option 1:* Set accessKey and secretKey here, +and CircleCI will create the secret for you. + +*Option 2:* Leave +accessKey and secretKey blank, and create the secret yourself. CircleCI +will assume it exists. + +*Option 3:* Leave accessKey and secretKey +blank, and set the irsaRole field (IAM roles for service accounts). + +|nomad.auto_scaler.aws.autoScalingGroup |string |`+"asg-name"+` | + +|nomad.auto_scaler.aws.enabled |bool |`+false+` | + +|nomad.auto_scaler.aws.irsaRole |string |`+""+` | + +|nomad.auto_scaler.aws.region |string |`+"some-region"+` | + +|nomad.auto_scaler.aws.secretKey |string |`+""+` | + +|nomad.auto_scaler.enabled |bool |`+false+` | + +|nomad.auto_scaler.gcp.enabled |bool |`+false+` | + +|nomad.auto_scaler.gcp.mig_name |string +|`+"some-managed-instance-group-name"+` | + +|nomad.auto_scaler.gcp.project_id |string |`+"some-project"+` | + +|nomad.auto_scaler.gcp.region |string |`+""+` | + +|nomad.auto_scaler.gcp.service_account |object +|`+{"project_id":"... ...","type":"service_account"}+` |GCP +Authentication Config (3 Options). + +*Option 1:* Set +service_account with the service account JSON (raw JSON, not a string), +and CircleCI will create the secret for you. + +*Option 2:* Leave +the service_account field as its default, and create the secret +yourself. CircleCI will assume it exists. + +*Option 3:* Leave the +service_account field as its default, and set the workloadIdentity field +with a service account email to use workload identities. + +|nomad.auto_scaler.gcp.workloadIdentity |string |`+""+` | + +|nomad.auto_scaler.gcp.zone |string |`+""+` | + +|nomad.auto_scaler.image.repository |string +|`+"hashicorp/nomad-autoscaler"+` |The Docker image repository for the +Nomad Autoscaler. Note this repository is not managed by CircleCI. + +|nomad.auto_scaler.image.tag |string |`+"0.4.6"+` |Nomad Autoscaler has +been tested against this specific version tag; edit this value at your +own risk. + +|nomad.auto_scaler.scaling.cooldown |string |`+"2m"+` |A time interval +after a scaling action during which no additional scaling will be +performed on the resource. + +|nomad.auto_scaler.scaling.evaluation_interval |string |`+"1m"+` +|Defines how often the policy is evaluated by the Autoscaler. + +|nomad.auto_scaler.scaling.max |int |`+5+` | + +|nomad.auto_scaler.scaling.min |int |`+1+` | + +|nomad.auto_scaler.scaling.node_drain_deadline |string |`+"5m"+` | + +|nomad.buildAgentImage |string |`+"circleci/picard"+` |By default, +Dockerhub is assumed to be the image registry unless otherwise specified +eg: registry.example.com/organization/repository + +|nomad.clients |object |`+{}+` | + +|nomad.server.gossip.encryption.enabled |bool |`+true+` | + +|nomad.server.image.repository |string |`+"hashicorp/nomad"+` |The +Docker image repository for the Nomad Server. Note this repository is +not managed by CircleCI. + +|nomad.server.image.tag |string |`+"1.10.0"+` |Nomad has been tested +against this specific version tag; edit this value at your own risk. + +|nomad.server.internal |bool |`+true+` |Disables deploying this chart’s +Internal Nomad servers + +|nomad.server.pdb.enabled |bool |`+true+` | + +|nomad.server.pdb.minAvailable |int |`+2+` | + +|nomad.server.port |int |`+4646+` | + +|nomad.server.replicas |int |`+5+` | + +|nomad.server.rpc.mTLS |object +|`+{"CACertificate":"","certificate":"","privateKey":""}+` |Nomad mTLS +(3 Options), strongly suggested for RPC communication + +Encrypts +traffic and authenticates clients to ensure no unauthenticated clients +can join the cluster. + +*Option 1:* Leave the values blank, and +don’t use mTLS (not recommended). + +*Option 2:* Provide the +CACertificate, certificate, and privateKey values - CircleCI will create +the secret for you. + +*Option 3:* Leave the value blank (default) +and create the secret yourself. If the values are populated, CircleCI +will use them. + +The secret must be named '`nomad-mtls`', be in the +same namespace, and have the key:values of: ca.pem: +'`base64-encoded-certificate-authority`', key.pem: +'`base64-encoded-private-key`', cert.pem: +'`base64-encoded-certificate`'. + +|nomad.server.scheme |string |`+"http"+` | + +|nomad.server.serverHostname |string |`+"cluster.local"+` |- When +nomad.server.internal is false, use `+serverHostname+` to set the URL +for connecting to your external nomad servers + +|nomad.server.service.unsafe_expose_api |bool |`+false+` | + +|object_storage |object +|`+{"bucketName":"","expireAfter":0,"gcs":{"enabled":false,"service_account":{"project_id":"... ...","type":"service_account"},"workloadIdentity":""},"s3":{"accessKey":"","enabled":false,"endpoint":"https://s3.us-east-1.amazonaws.com","irsaRole":"","partition":"aws","presigned":true,"region":"us-east-1","secretKey":"","storageRole":""}}+` +|Object storage for build artifacts, audit logs, test results and more. +One of object_storage.s3.enabled or object_storage.gcs.enabled must be +true for the chart to function. + +|object_storage.expireAfter |int |`+0+` |Number of days after which +artifacts will expire from the UI + +|object_storage.gcs.service_account |object +|`+{"project_id":"... ...","type":"service_account"}+` |GCP Storage +(GCS) Authentication Config (3 Options). + +*Option 1:* Set +`+service_account+` with the service account JSON (raw JSON, not a +string), and CircleCI will create the secret for you. + +*Option 2:* +Leave the `+service_account+` field as its default, and create the +secret yourself. CircleCI will assume it exists. + +*Option 3:* +Leave the `+service_account+` field as its default, and set the +`+workloadIdentity+` field with a service account email to use workload +identities. + +|object_storage.s3 |object +|`+{"accessKey":"","enabled":false,"endpoint":"https://s3.us-east-1.amazonaws.com","irsaRole":"","partition":"aws","presigned":true,"region":"us-east-1","secretKey":"","storageRole":""}+` +|S3 Configuration for Object Storage. Authentication methods: AWS +Access/Secret Key, and IRSA Role + +|object_storage.s3.accessKey |string |`+""+` |AWS Authentication Config +(3 Options). + +*Option 1:* Set accessKey and secretKey here, and +CircleCI will create the secret for you. + +*Option 2:* Leave +accessKey and secretKey blank, and create the secret yourself. CircleCI +will assume it exists. + +*Option 3:* Leave accessKey and secretKey +blank, and set the irsaRole field (IAM roles for service accounts), also +set region: "`your-aws-region`". + +|object_storage.s3.endpoint |string +|`+"https://s3.us-east-1.amazonaws.com"+` |API endpoint for S3. If in +AWS us-west-2, for example, this would be the regional endpoint +https://s3.us-west-2.amazonaws.com. If using S3 compatible storage, +specify the API endpoint of your object storage server + +|object_storage.s3.presigned |bool |`+true+` |When true object storage +will be handled with presigned URLs. When false direct bucket access +will be used instead. Direct access requires storageRole to be +non-empty. + +|object_storage.s3.storageRole |string |`+""+` |A role that can be +assumed to provide direct bucket access credentials. Required if +presigned is false + +|oidc_service.isEnabled |bool |`+false+` |Whether or not to enable oidc +support. + +|oidc_service.json_web_keys |string |`+""+` |The json web key (JWK) or +key set (JWKS) used for signing ID tokens. Value should be base64 +encoded. + +|oidc_service.replicas |int |`+1+` |Number of replicas to deploy for the +oidc-service deployment. + +|oidc_service.resources |object +|`+{"limits":{"cpu":"200m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}+` +|Resource configuration for the oidc-service deployment. + +|oidc_service.token_max_ttl |string |`+"5h"+` |Maximum time-to-live for +newly minted ID tokens. + +|oidc_tasks_service.replicas |int |`+1+` |Number of replicas to deploy +for the oidc-tasks-service deployment. + +|oidc_tasks_service.resources |object +|`+{"limits":{"cpu":"200m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}+` +|Resource configuration for the oidc-tasks-service deployment. + +|orb_service.replicas |int |`+1+` |Number of replicas to deploy for the +orb-service deployment. + +|orb_service.resources.limits.cpu |string |`+"4000m"+` |CPU limit for +the orb-service deployment. + +|orb_service.resources.limits.memory |string |`+"8Gi"+` |Memory limit +for the orb-service deployment. + +|output.internal.replicas |string |`+nil+` |Number of replicas to deploy +for the output-internal deployment. + +|output.internal.resources.limits.memory |string |`+"1Gi"+` |Memory +limit for the output-internal deployment. + +|output.public |object +|`+{"replicas":1,"resources":{"limits":{"memory":"512Mi"}}}+` |Number of +replicas to deploy for the output-public deployment. + +|output.public.resources.limits.memory |string |`+"512Mi"+` |Memory +limit for the output-public deployment. + +|output.receiver |object +|`+{"replicas":1,"resources":{"limits":{"memory":"1Gi"}}}+` |Number of +replicas to deploy for the output-receiver deployment. + +|output.receiver.resources.limits.memory |string |`+"1Gi"+` |Memory +limit for the output-receiver deployment. + +|permissions_service.replicas |int |`+1+` |Number of replicas to deploy +for the permissions-service deployment. + +|permissions_service.resources.limits.cpu |string |`+"1000m"+` |CPU +limit for the permissions-service deployment. + +|permissions_service.resources.limits.memory |string |`+"1Gi"+` |Memory +limit for the permissions-service deployment. + +|policy_service.replicas |int |`+1+` |Number of replicas to deploy for +the policy-service deployment. + +|policy_service.resources.limits.cpu |string |`+"200m"+` |CPU limit for +the policy-service deployment. + +|policy_service.resources.limits.memory |string |`+"128Mi"+` |Memory +limit for the policy-service deployment. + +|policy_service_internal.replicas |int |`+1+` |Number of replicas to +deploy for the policy-service-internal deployment. + +|policy_service_internal.resources.limits.cpu |string |`+"200m"+` |CPU +limit for the policy-service-internal deployment. + +|policy_service_internal.resources.limits.memory |string |`+"128Mi"+` +|Memory limit for the policy-service-internal deployment. + +|postgresql.auth.existingSecret |string |`+""+` |Name of existing secret +to use for PostgreSQL credentials. `+auth.postgresPassword+` and +`+auth.password+` will be ignored and picked up from this secret. The +existing secret must contain the key `+postgres-password+` when +postgresql.internal is true or `+password+` when postgresql.internal is +false + +|postgresql.auth.password |string |`+""+` |Use only when +postgresql.internal is false, this is the password of your externalized +postgres user Ignored if `+auth.existingSecret+` with key `+password+` +is provided + +|postgresql.auth.postgresPassword |string |`+""+` |Password for the +"`postgres`" admin user on the internal postgres instance. Use only when +postgresql.internal is true. Ignored if `+auth.existingSecret+` with key +`+postgres-password+` is provided. + +|postgresql.auth.username |string |`+""+` |Use only when +postgresql.internal is false, then this is the username used to connect +with your externalized postgres instance + +|postgresql.fullnameOverride |string |`+"postgresql"+` | + +|postgresql.image.pullSecrets[0] |string |`+"regcred"+` | + +|postgresql.image.registry |string |`+"cciserver.azurecr.io"+` | + +|postgresql.image.repository |string |`+"server-postgres"+` | + +|postgresql.image.tag |string |`+"12.16.37-7629bfd"+` | + +|postgresql.internal |bool |`+true+` | + +|postgresql.postgresqlHost |string |`+"postgresql"+` |The host and port +below are used by the CirceCI application to locate PostgreSQL DB. These +values are not used by the PostgreSQL chart. + +|postgresql.postgresqlPort |int |`+5432+` | + +|postgresql.primary.extendedConfiguration |string +|`+"max_connections = 500\nshared_buffers = 300MB\n"+` | + +|postgresql.primary.labels.app |string |`+"postgres"+` | + +|postgresql.primary.labels.layer |string |`+"data"+` | + +|postgresql.primary.persistence.existingClaim |string |`+""+` |To +increase PVC size, follow this guide: +https://circleci.com/docs/server/operator/expanding-internal-database-volumes + +|postgresql.primary.persistence.size |string |`+"8Gi"+` | + +|postgresql.primary.podAnnotations.”backup.velero.io/backup-volumes” +|string |`+"data"+` | + +|postgresql.primary.podLabels.app |string |`+"postgres"+` | + +|postgresql.primary.podLabels.layer |string |`+"data"+` | + +|postgresql.readReplicas.labels.app |string |`+"postgres"+` | + +|postgresql.readReplicas.labels.layer |string |`+"data"+` | + +|postgresql.readReplicas.podLabels.app |string |`+"postgres"+` | + +|postgresql.readReplicas.podLabels.layer |string |`+"data"+` | + +|proxy.enabled |bool |`+false+` |If false, all proxy settings are +ignored + +|proxy.http |object +|`+{"auth":{"enabled":false,"password":null,"username":null},"host":"proxy.example.com","port":3128}+` +|Proxy for HTTP requests + +|proxy.https |object +|`+{"auth":{"enabled":false,"password":null,"username":null},"host":"proxy.example.com","port":3128}+` +|Proxy for HTTPS requests + +|proxy.no_proxy |list |`+[]+` |List of hostnames, IP CIDR blocks exempt +from proxying. Loopback and intra-service traffic is never proxied. + +|public_api_service.replicas |int |`+1+` | + +|public_api_service.resources.limits.cpu |string |`+"0.2"+` | + +|public_api_service.resources.limits.memory |string |`+"1024Mi"+` | + +|pusher.key |string |`+"circle"+` | + +|rabbitmq.auth.erlangCookie |string |`+""+` |The erlang cookie for your +rabbitMQ instance. The string can be provided here in the values.yaml or +in in a secret with the key, `+rabbitmq-erlang-cookie+`. If you are +using a secret, leave this empty and provide the name of your secret +below in `+existingErlangSecret+`. + +|rabbitmq.auth.existingErlangSecret |string |`+""+` |Must contain the +key `+rabbitmq-erlang-cookie+` + +|rabbitmq.auth.existingPasswordSecret |string |`+""+` |Must contain the +key `+rabbitmq-password+` + +|rabbitmq.auth.password |string |`+""+` |The password of your rabbitMQ +admin user. The password can be provided here in the values.yaml or in +in a secret with the key, `+rabbitmq-password+`. If you are using a +secret, leave this empty and provide the name of your secret below in +`+existingPasswordSecret+`. + +|rabbitmq.auth.username |string |`+"circle"+` | + +|rabbitmq.fullnameOverride |string |`+"rabbitmq"+` | + +|rabbitmq.host |string |`+"rabbitmq"+` |When `+internal: true+`, this +value is '`rabbitmq`' else host of external rabbitmq instance + +|rabbitmq.image.pullSecrets[0] |string |`+"regcred"+` | + +|rabbitmq.image.registry |string |`+"cciserver.azurecr.io"+` | + +|rabbitmq.image.repository |string |`+"server-rabbitmq"+` | + +|rabbitmq.image.tag |string |`+"3.12.423-3363c50"+` | + +|rabbitmq.internal |bool |`+true+` |Disables this charts Internal +RabbitMQ instance + +|rabbitmq.management_gui_port |int |`+15672+` |When `+internal: true+`, +this value is '`15672`' else port of external rabbitmq instance + +|rabbitmq.persistence.existingClaim |string |`+""+` |To increase PVC +size, follow this guide: +https://circleci.com/docs/server/operator/expanding-internal-database-volumes + +|rabbitmq.persistence.size |string |`+"8Gi"+` | + +|rabbitmq.podAnnotations.”backup.velero.io/backup-volumes” |string +|`+"data"+` | + +|rabbitmq.podLabels.app |string |`+"rabbitmq"+` | + +|rabbitmq.podLabels.layer |string |`+"data"+` | + +|rabbitmq.port |int |`+5672+` |When `+internal: true+`, this value is +'`5672`' else port of external rabbitmq instance + +|rabbitmq.replicaCount |int |`+1+` | + +|rabbitmq.secure_amqp |bool |`+false+` |When set to true, amqps is used. + +|rabbitmq.statefulsetLabels.app |string |`+"rabbitmq"+` | + +|rabbitmq.statefulsetLabels.layer |string |`+"data"+` | + +|redis.cluster.enabled |bool |`+true+` | + +|redis.cluster.slaveCount |int |`+1+` | + +|redis.fullnameOverride |string |`+"redis"+` | + +|redis.image.tag |string |`+"6.2.1-debian-10-r13"+` | + +|redis.master.extraEnvVars[0].name |string |`+"REDIS_EXTRA_FLAGS"+` | + +|redis.master.extraEnvVars[0].value |string |`+"--databases 30"+` | + +|redis.master.persistence.size |string |`+"8Gi"+` |To increase PVC size, +follow this guide: +https://circleci.com/docs/server/operator/expanding-internal-database-volumes + +|redis.master.podAnnotations.”backup.velero.io/backup-volumes” |string +|`+"redis-data"+` | + +|redis.podLabels.app |string |`+"redis"+` | + +|redis.podLabels.layer |string |`+"data"+` | + +|redis.slave.extraEnvVars[0].name |string |`+"REDIS_EXTRA_FLAGS"+` | + +|redis.slave.extraEnvVars[0].value |string |`+"--databases 30"+` | + +|redis.slave.persistence.size |string |`+"8Gi"+` |To increase PVC size, +follow this guide: +https://circleci.com/docs/server/operator/expanding-internal-database-volumes + +|redis.slave.podAnnotations.”backup.velero.io/backup-volumes” |string +|`+"redis-data"+` | + +|redis.statefulset.labels.app |string |`+"redis"+` | + +|redis.statefulset.labels.layer |string |`+"data"+` | + +|redis.usePassword |bool |`+false+` | + +|runner_admin.cleaner.replicas |int |`+1+` |Number of replicas to deploy +for the radm-cleaner deployment. + +|runner_admin.cleaner.resources.limits.cpu |string |`+"1"+` |CPU limit +for the radm-cleaner deployment + +|runner_admin.cleaner.resources.limits.memory |string |`+"512M"+` +|Memory limit for the radm-cleaner deployment + +|runner_admin.cleaner.resources.requests.cpu |string |`+"1"+` |CPU +request for the radm-cleaner deployment + +|runner_admin.cleaner.resources.requests.memory |string |`+"512M"+` +|Memory request for the radm-cleaner deployment + +|runner_admin.external.replicas |int |`+1+` |Number of replicas to +deploy for the radm-external deployment. + +|runner_admin.external.resources.limits.cpu |string |`+"1"+` |CPU limit +for the radm-external deployment + +|runner_admin.external.resources.limits.memory |string |`+"512M"+` +|Memory limit for the radm-external deployment + +|runner_admin.external.resources.requests.cpu |string |`+"1"+` |CPU +request for the radm-external deployment + +|runner_admin.external.resources.requests.memory |string |`+"512M"+` +|Memory request for the radm-external deployment + +|runner_admin.internal.replicas |int |`+1+` |Number of replicas to +deploy for the radm-internal deployment. + +|runner_admin.internal.resources.limits.cpu |string |`+"1"+` |CPU limit +for the radm-internal deployment + +|runner_admin.internal.resources.limits.memory |string |`+"512M"+` +|Memory limit for the radm-internal deployment + +|runner_admin.internal.resources.requests.cpu |string |`+"1"+` |CPU +request for the radm-internal deployment + +|runner_admin.internal.resources.requests.memory |string |`+"512M"+` +|Memory request for the radm-internal deployment + +|serveUnsafeArtifacts |bool |`+false+` |⚠️ WARNING: Changing this to +true will serve HTML artifacts instead of downloading them. This can +allow specially-crafted artifacts to gain control of users’ CircleCI +accounts. + +|smtp |object +|`+{"host":"smtp.example.com","notificationUser":"builds@circleci.com","password":"secret-smtp-passphrase","port":25,"tls":true,"user":"notification@example.com"}+` +|Email notification settings + +|smtp.port |int |`+25+` |Outbound connections on port 25 are blocked on +most cloud providers. Should you select this default port, be aware that +your notifications may fail to send. + +|smtp.tls |bool |`+true+` |StartTLS is used to encrypt mail by default. +Only disable this if you can otherwise guarantee the confidentiality of +traffic. + +|soketi.image.repository |string |`+"quay.io/soketi/soketi"+` |The +Soketi image repository for NGINX. Note this repository is not managed +by CircleCI. + +|soketi.image.tag |string |`+"1.6-16-distroless"+` |Soketi has been +tested against this specific version tag; edit this value at your own +risk. + +|soketi.replicas |int |`+1+` |Number of replicas to deploy for the +soketi deployment. + +|step.internal.replicas |int |`+1+` |Number of replicas to deploy for +the step-internal deployment. + +|step.internal.resources.limits.cpu |int |`+2+` |CPU limit for the +step-internal deployment + +|step.internal.resources.limits.memory |string |`+"512Mi"+` |Memory +limit for the step-internal deployment + +|step.receiver.replicas |int |`+1+` |Number of replicas to deploy for +the step-receiver deployment. + +|step.receiver.resources.limits.cpu |int |`+2+` |CPU limit for the +step-receiver deployment + +|step.receiver.resources.limits.memory |string |`+"512Mi"+` |Memory +limit for the step-receiver deployment + +|telegraf.args[0] |string |`+"--config"+` | + +|telegraf.args[1] |string |`+"/etc/telegraf/telegraf.conf"+` | + +|telegraf.args[2] |string |`+"--config-directory"+` | + +|telegraf.args[3] |string |`+"/etc/telegraf/telegraf.d"+` | + +|telegraf.args[4] |string |`+"--watch-config"+` | + +|telegraf.args[5] |string |`+"poll"+` | + +|telegraf.config.agent.omit_hostname |bool |`+true+` | + +|telegraf.config.inputs[0].statsd.datadog_extensions |bool |`+true+` | + +|telegraf.config.inputs[0].statsd.max_ttl |string |`+"12h"+` | + +|telegraf.config.inputs[0].statsd.metric_separator |string |`+"."+` | + +|telegraf.config.inputs[0].statsd.percentile_limit |int |`+1000+` | + +|telegraf.config.inputs[0].statsd.percentiles[0] |int |`+50+` | + +|telegraf.config.inputs[0].statsd.percentiles[1] |int |`+95+` | + +|telegraf.config.inputs[0].statsd.percentiles[2] |int |`+99+` | + +|telegraf.config.inputs[0].statsd.service_address |string |`+":8125"+` | + +|telegraf.config.outputs[0].file.files[0] |string |`+"stdout"+` | + +|telegraf.custom_config |string |`+""+` | + +|telegraf.fullnameOverride |string |`+"telegraf"+` | + +|telegraf.mountPoints[0].mountPath |string +|`+"/etc/telegraf/telegraf.d"+` | + +|telegraf.mountPoints[0].name |string |`+"telegraf-config"+` | + +|telegraf.resources.limits.memory |string |`+"512Mi"+` |Memory limit for +the telegraf deployment. + +|telegraf.resources.requests.cpu |string |`+"200m"+` |CPU request for +the telegraf deployment. + +|telegraf.resources.requests.memory |string |`+"256Mi"+` |Memory request +for the telegraf deployment. + +|telegraf.volumes[0].configMap.name |string |`+"telegraf-config"+` | + +|telegraf.volumes[0].name |string |`+"telegraf-config"+` | + +|tink |object |`+{"enabled":false,"keyset":""}+` |Tink Configuration ++ + Tink is given precedence over vault. If tink.enabled is true, +vault will not be deployed. Tink or vault must be set once at install +and cannot be changed. + + *Option 1:* Leave this blank, and create +the secret yourself. CircleCI will assume it exists. + + The secret +must be named '`tink`' and have the key; keyset. *Option 2:* Set the +values tink.keyset here and CircleCI will create the secret +automatically. + + Generate a keyset via: +`+tinkey create-keyset --key-template XCHACHA20_POLY1305+` + +|tink.enabled |bool |`+false+` |When enabled, Tink will be used instead +of Vault for contexts encryption. + +|tink.keyset |string |`+""+` |The keyset generated the Tink CLI to be +used for contexts encryption. + +|tls.certificate |string |`+""+` |Base64 encoded certificate must be +provided if kong.acme.enabled is false + +|tls.certificates |list |`+[]+` |List of base64’d certificates that will +be imported into the system + +|tls.import |list |`+[]+` |List of host:port from which to import +certificates + +|tls.privateKey |string |`+""+` |Base64 encoded private key must be +provided if kong.acme.enabled is false + +|vault |object +|`+{"internal":true,"podAnnotations":{"backup.velero.io/backup-volumes":"data"},"token":"","transitPath":"transit","url":"http://vault:8200"}+` +|External Services configuration + +|vault.internal |bool |`+true+` |Disables this charts Internal Vault +instance + +|vault.token |string |`+""+` |This token is required when +`+internal: false+`. + +*Option 1:* Leave this blank, and create the +secret yourself. CircleCI will assume it exists. + +The secret must +be named '`vault`' and have the key; token. + +*Option 2:* Set the +values vault.token here and CircleCI will create the secret +automatically. + +|vault.transitPath |string |`+"transit"+` |When `+internal: true+`, this +value is used for the vault transit path. + +|web_ui.replicas |int |`+1+` |Number of replicas to deploy for the +web-ui deployment. + +|web_ui.resources.limits.memory |string |`+"512Mi"+` |Memory limit +configuration for the web-ui deployment + +|web_ui_authentication.replicas |int |`+1+` |Number of replicas to +deploy for the web-ui-authentication deployment. + +|web_ui_authentication.resources.limits.memory |string |`+"350Mi"+` +|Memory limit configuration for the web-ui-authentication deployment + +|web_ui_server_admin.replicas |int |`+1+` |Number of replicas to deploy +for the web-ui-server-admin deployment. + +|web_ui_server_admin.resources.limits.memory |string |`+"256Mi"+` +|Memory limit configuration for the web-ui-server-admin deployment. + +|webhook_service.isEnabled |bool |`+true+` | + +|webhook_service.replicas |int |`+1+` |Number of replicas to deploy for +the webhook-service deployment. + +|webhook_service.resources.limits.cpu |int |`+2+` |CPU limit +configuration for the webhook-service deployment. + +|webhook_service.resources.limits.memory |string |`+"5Gi"+` |Memory +limit configuration for the webhook-service deployment. + +|workflows_conductor_event_consumer.replicas |int |`+1+` |Number of +replicas to deploy for the workflows-conductor-event-consumer +deployment. + +|workflows_conductor_event_consumer.resources.limits.cpu |string +|`+"6000m"+` |CPU limit configuration for the +workflows-conductor-event-consumer deployment. + +|workflows_conductor_event_consumer.resources.limits.memory |string +|`+"8Gi"+` |Memory limit configuration for the +workflows-conductor-event-consumer deployment. + +|workflows_conductor_grpc.replicas |int |`+1+` |Number of replicas to +deploy for the workflows-conductor-grpc deployment. + +|workflows_conductor_grpc.resources.limits.cpu |string |`+"4000m"+` |CPU +limit configuration for the workflows-conductor-grpc deployment. + +|workflows_conductor_grpc.resources.limits.memory |string |`+"8Gi"+` +|Memory limit configuration for the workflows-conductor-grpc deployment. +|=== +-- +pass:[<!-- vale on -->] diff --git a/docs/server-admin-4.9/modules/installation/pages/installing-server-behind-a-proxy.adoc b/docs/server-admin-4.9/modules/installation/pages/installing-server-behind-a-proxy.adoc new file mode 100644 index 0000000000..f640ccee9b --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/installing-server-behind-a-proxy.adoc @@ -0,0 +1,37 @@ += Installing server behind a proxy +:page-platform: Server 4.9, Server Admin +:page-description: Learn how to install CircleCI server 4.9 behind a proxy. +:experimental: + +Depending on your security requirements, you might want to install CircleCI server behind a proxy. Installing behind a proxy gives you the power to monitor and control access between your installation and the broader Internet. + +Configuring a proxy happens during phase 2, core services (xref:phase-2-aws-core-services.adoc#m-installing-behind-a-proxy[AWS], xref:phase-2-gcp-core-services.adoc#m-installing-behind-a-proxy[GCP]). + +[#known-limitations] +== Known limitations + +* Some additional configuration is required to import orbs when installed behind a proxy. See xref:operator:managing-orbs.adoc#using-orbs-behind-a-proxy[Orbs on server] docs for more information. +* The JVM only accepts proxies that run over HTTP, not HTTPS, and therefore proxy URIs must be of the form `\http://user:password@host:port` rather than `\https://user:password@host:port`. +* If your GitHub instance is running outside of the proxied environment (either GitHub.com or GitHub Enterprise), you must ensure that SSH traffic from CircleCI (inside the Kubernetes cluster) and from our Nomad node can reach your instance. The default `checkout` step in a CircleCI job will fail to clone code and our `ssh-keyscan` of GitHub Enterprise will not work. While you may configure an SSH proxy, `ssh-keyscan` can NOT be proxied and instead will require you provide `github.fingerprint` when using GHE. +* If you install server behind a proxy, you may need to provide a custom image for machine provisioner. Visit the link:https://github.com/CircleCI-Public/circleci-server-linux-image-builder[CircleCI Linux Image Builder repository] for further information. +* If object storage is outside the proxy, no job features that use object storage will work. This includes: +** Artifacts +** Test results +** Cache save and restore +** Workspaces ++ +Users can get around this restriction by setting environment variables on their jobs. For example: ++ +[source,yaml] +---- +jobs: + my-job: + docker: + - image: cimg/node:17.2.0 + environment: + HTTP_PROXY: http://proxy.example.com:3128 + HTTPS_PROXY: http://proxy.example.com:3128 + NO_PROXY: whatever.internal,10.0.1.2 +---- ++ +WARNING: It is crucial that these environment variables are set in this specific location because it is the only location that propagates them to the correct service. diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-1-aws-prerequisites.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-1-aws-prerequisites.adoc new file mode 100644 index 0000000000..0b2f54c6d8 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-1-aws-prerequisites.adoc @@ -0,0 +1,9 @@ += Phase 1 AWS - Prerequisites +:page-platform: Server 4.9, Server Admin +:page-description: Find the general and infrastructure-specific requirements that are needed in order to configure the CircleCI server 4.9 application. +:experimental: +:env-aws: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-1.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-1-gcp-prerequisites.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-1-gcp-prerequisites.adoc new file mode 100644 index 0000000000..24064d6dda --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-1-gcp-prerequisites.adoc @@ -0,0 +1,9 @@ += Phase 1 GCP - Prerequisites +:page-platform: Server 4.9, Server Admin +:page-description: Find the general and infrastructure-specific requirements that are needed in order to configure the CircleCI server 4.9 application. +:env-gcp: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-1.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-2-aws-core-services.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-2-aws-core-services.adoc new file mode 100644 index 0000000000..e087d2f044 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-2-aws-core-services.adoc @@ -0,0 +1,9 @@ += Phase 2 AWS - core services +:page-platform: Server 4.9, Server Admin +:page-description: Installation guide for CircleCI server 4.9 core services. +:env-aws: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-2.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-2-gcp-core-services.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-2-gcp-core-services.adoc new file mode 100644 index 0000000000..b75322bb89 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-2-gcp-core-services.adoc @@ -0,0 +1,9 @@ += Phase 2 GCP - core services +:page-platform: Server 4.9, Server Admin +:page-description: Installation guide for CircleCI server 4.9 core services. +:env-gcp: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-2.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-3-aws-execution-environments.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-3-aws-execution-environments.adoc new file mode 100644 index 0000000000..e04e1baac2 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-3-aws-execution-environments.adoc @@ -0,0 +1,9 @@ += Phase 3 AWS - execution environments +:page-platform: Server 4.9, Server Admin +:page-description: Installation guide for CircleCI server 4.9 execution environments. +:env-aws: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-3.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-3-gcp-execution-environments.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-3-gcp-execution-environments.adoc new file mode 100644 index 0000000000..660c9e89b5 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-3-gcp-execution-environments.adoc @@ -0,0 +1,9 @@ += Phase 3 GCP - execution environments +:page-platform: Server 4.9, Server Admin +:page-description: Installation guide for CircleCI server 4.9 execution environments. +:experimental: +:env-gcp: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-3.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-4-aws-post-installation.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-4-aws-post-installation.adoc new file mode 100644 index 0000000000..b82a811057 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-4-aws-post-installation.adoc @@ -0,0 +1,9 @@ += Phase 4 AWS - post installation +:page-platform: Server 4.9, Server Admin +:page-description: CircleCI server 4.9 post installation steps. +:env-aws: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-4.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/phase-4-gcp-post-installation.adoc b/docs/server-admin-4.9/modules/installation/pages/phase-4-gcp-post-installation.adoc new file mode 100644 index 0000000000..3c9e8973db --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/phase-4-gcp-post-installation.adoc @@ -0,0 +1,9 @@ += Phase 4 GCP - post installation +:page-platform: Server 4.9, Server Admin +:page-description: CircleCI server 4.9 post installation steps. +:env-gcp: +:experimental: + +// This doc uses ifdef and ifndef directives to display or hide content specific to Google Cloud Storage (env-gcp) and AWS (env-aws). The directives test for logical opposites. For example, if the attribute is NOT env-aws, display this content. This method was chosen to avoid unintentionally hiding content. For more information, see https://docs.asciidoctor.org/asciidoc/latest/directives/ifdef-ifndef/. + +include::ROOT:partial$installation/phase-4.adoc[] diff --git a/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc b/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc new file mode 100644 index 0000000000..03a8e5dc40 --- /dev/null +++ b/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc @@ -0,0 +1,50 @@ += Upgrade server +:page-platform: Server 4.9, Server Admin +:page-description: "This document lists the steps required to upgrade a CircleCI server 4.9 installation." +:experimental: + +This page describes the steps needed to upgrade your CircleCI server installation to 4.9. + +[#path] +== Upgrade paths + +#TBC# + +[#recommendations] +== Recommendations + +We have moved away from Vault to Tink for encryption. The process for migration is link:https://github.com/CircleCI-Public/server-scripts/tree/main/vault-to-tink[documented here]. The migration includes a convenience script to move existing secrets. + +If you are using Vault, you should complete the migration to Tink on your current installation. Complete this migration before backing up your server in preparation for upgrading to 4.9. + +Customers that do not perform this step may have issues restoring Vault from backup in 4.9. + +[#prerequisites] +== Prerequisites + +* Ensure you have access to the Kubernetes cluster in which server is installed. +* Ensure you have set up xref:operator:backup-and-restore.adoc#[Backup and Restore]. +* Ensure there is a recent backup. For more information, see the xref:operator:backup-and-restore.adoc#creating-backups[Backup and Restore] guide. + +[#upgrade-steps] +== Upgrade steps + +. Ensure your cluster is running a compatible Kubernetes version for this release ({kubernetesversions}). + +. Check the link:https://circleci.com/server/changelog/[changelog] and make sure there are no actions you need to take before deploying a new version. + +. Optionally, confirm what the update is going to do using link:https://github.com/databus23/helm-diff[Helm Diff]: ++ +[source,shell,subs=attributes+] +helm diff upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion49} -f <path-to-values.yaml> --username $USERNAME --password $PASSWORD + +. Upgrade your Nomad clients and servers (if externalized) Terraform modules to the link:https://github.com/CircleCI-Public/server-terraform/releases/tag/4.9.0[4.9 release]. Follow the documentation to plan and apply the Terraform changes for your xref:phase-3-aws-execution-environments.adoc#create-your-cluster-with-terraform[AWS] or xref:phase-3-gcp-execution-environments.adoc#create-your-cluster-with-terraform[GCP] environment. + +. Perform the upgrade: ++ +[source,shell,subs=attributes+] +helm upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion49} -f <path-to-values.yaml> --username $USERNAME --password $PASSWORD + +. Deploy and run link:https://github.com/circleci/realitycheck[`reality check`] in your test environment to ensure your installation is fully operational. + +. Migrate any existing machine runner instances from the launch agent to the machine runner 3.0 agent, see the xref:guides:execution-runner:migrate-from-launch-agent-to-machine-runner-3-on-linux.adoc#[migration guide]. The launch agent is not supported. diff --git a/docs/server-admin-4.9/modules/operator/pages/application-lifecycle.adoc b/docs/server-admin-4.9/modules/operator/pages/application-lifecycle.adoc new file mode 100644 index 0000000000..d35bc23cf5 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/application-lifecycle.adoc @@ -0,0 +1,68 @@ += Application lifecycle +:page-platform: Server 4.9, Server Admin +:page-description: Learn about CircleCI server 4.9 semantic versioning and release schedules. +:experimental: + +CircleCI is committed to supporting two minor versions of the software. This means a minor version will be released twice yearly and receive patches for up to 12 months. We use semantic versioning to help identify releases and their impact on your installation. Patch releases will continue to be monthly as needed. + +[#semantic-versioning] +== Semantic versioning +Given a version number, MAJOR.MINOR.PATCH increment, use the: + +. MAJOR version when you make incompatible API changes, +. MINOR version when you add functionality in a backwards-compatible manner, and +. PATCH version when you make backwards compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. + +[#release-schedule] +== Release schedule +We release monthly patch fixes for bugs and security concerns. We will have twice yearly new minor (feature) releases. All releases will be posted to the change log. To stay up to date with the most recent releases, subscribe to the link:https://circleci.com/server/changelog/[change log]. + +[#end-of-support] +== End of support +With each minor release, in accordance with our link:https://circleci.com/legal/terms-of-service/[terms of service], we end support for previous versions of server. The following table provides dates for previously released versions. + +CAUTION: Future dates listed here may change at any time. + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Version | Released | End of Support + +|4.9.x +|07/24/2025 +|07/24/2026* + +|4.7.x +|11/07/2024 +|11/07/2025* + +|4.6.x +|8/12/2024 +|07/24/2025 + +|4.5.x +|4/30/2024 +|4/30/2025 + +|4.4.x +|2/05/2024 +|2/28/2025 + +|4.3.x +|11/07/2023 +|11/30/2024 + +|4.2.x +|7/18/2023 +|7/31/2024 + +|4.1.x +|3/21/2023 +|3/31/2024 + +|4.0.x +|7/28/2022 +|7/31/2023 +|=== diff --git a/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc b/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc new file mode 100644 index 0000000000..3101c05979 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc @@ -0,0 +1,102 @@ += Backup and restore +:page-platform: Server 4.9, Server Admin +:page-description: This document outlines recommendations for how to back up and restore your CircleCI server 4.9 instance data and state. +:experimental: + +[#overview-backup] +== Overview + +When operating and administering CircleCI server, you will need to consider how to maintain backups and recover your installation, should there be a need to migrate it to another cluster or recover from a critical event. + +CircleCI recommends link:https://velero.io/[Velero] for backup and restore. The benefit of this approach is that it not only restores your application's data, +but it also restores the state of the Kubernetes cluster and its resources at the time of the backup. CirleCI server supports backup and restore with Velero `1.12`. This document outlines recommendations for how to back up and restore your CircleCI server instance data and state using link:https://velero.io/[Velero]. + +NOTE: Backup and restore of the CircleCI services is dependent on Velero. If your cluster is lost, you will not be able to restore CircleCI until you have successfully started Velero in the cluster. From there you can recover the CircleCI services. + +[#installing-velero] +== Velero installation + +To use Velero, you must first install the link:https://velero.io/docs/v1.12/basic-install/[Velero CLI]. Then using Velero's CLI, you will need to install the Velero server components along with the appropriate plugin for your cloud storage solution. Follow the instructions linked below based on your chosen storage backend: + +* link:https://github.com/vmware-tanzu/velero-plugin-for-aws#overview[AWS S3] +* link:https://github.com/vmware-tanzu/velero-plugin-for-gcp#overview[GCP GCS] +* link:https://velero.io/docs/v1.12/contributions/minio/[S3 Compatible (MinIO)] + +NOTE: CircleCI server makes use of Persistent Volumes for data storage. To include these volumes in your backups you will need to add the node agent flag `--use-node-agent` to your Velero install command. link:https://velero.io/docs/v1.12/customize-installation/#enable-file-system-backup[Read here] for more information. + +[#creating-backups] +== Creating backups + +Now that Velero is installed on your cluster, you are ready to create your first backup. + +NOTE: For best results, we recommend that the backup be taken during a time when your CircleCI server instance is not in use. This is to ensure the data is not being altered while the backup is being generated, which limits the possibility of lost or corrupted data. You may use the following command to scale down the application's services so that no jobs are running during the backup. No data is lost in this process, but the application will be unreachable. Alternatively you may choose to schedule your backups to be done during a time when your CircleCI server instance is not in use. + +[source,bash] +---- +namespace=<your-server-namespace> +kubectl -n "$namespace" scale deploy -l "layer=application" --replicas="0" +---- + +Once you are ready, you may use the following command to create a backup of your CircleCI server instance. + +[source,bash] + +---- +K8S_NS=<your_server_namespace> +CHART=$(helm list -n "$K8S_NS" -o yaml | yq '.[].chart' ) +REV=$(helm list -n "$K8S_NS" -o yaml | yq '.[].revision') +RANDOM_STR=$(cat /dev/urandom | env LC_ALL=C tr -dc 'a-z0-9' | head -c 8) + +velero backup create "${K8S_NS}-${RANDOM_STR}" --include-namespaces "${K8S_NS}" --labels "chart--rev=${CHART}--${REV}" +---- + +NOTE: If you scaled down your application before initiating your backup, you can now restore your application state by running `helm upgrade`. Running this command restores your application to the state you have specified in your `values.yaml`. + +[#restoring-backups] +== Restoring backups + +Below are the instructions for how to restore your CircleCI server install using Velero. + +[source,bash] +---- +# List all existing backups +velero backup get --show-labels + +# Restore the specific backup +velero restore create --include-namespaces <circleci-namespace> --from-backup <backup-name> +---- + +[#scheduling-backups] +== Scheduling backups + +See link:https://velero.io/docs/v1.12/disaster-case/[Velero's documentation] on creating scheduled backups. + +[#troubleshooting-backup] +== Troubleshooting + +[#errors-occur-during-backup-or-restore-process] +=== Errors occur during backup or restore process + +If you experience an error during backup or restore processes, the first place to look would be the Velero logs. +Using the command below, you may find 4XX errors, which would likely be caused by issues with your storage bucket access. + +* Confirm that your bucket exists and is in the region you expect. +* Confirm that the credentials provided to Velero can be used to access the bucket. +* You may need to run the command to install Velero again, this time with updated bucket information. + +You may also check the status of pods in the `velero` namespace: + +[source,shell] +---- +$ kubectl get pods --namespace velero +NAME READY STATUS RESTARTS AGE +node-agent-5vlww 1/1 Pending 0 10m +node-agent-94ptv 1/1 Running 0 10m +node-agent-ch6m9 1/1 Pending 0 10m +node-agent-mknws 1/1 Running 0 10m +velero-68788b675c-dm2s7 1/1 Running 0 10m +---- + +In the above example, some Restic pods are pending, which means they are waiting for a node to have available CPU or memory resources. In this case, you may need to scale your nodes to accommodate Restic. + +For more details on troubleshooting Velero issues, refer to the link:https://velero.io/docs/v1.12/troubleshooting/[Velero documentation]. diff --git a/docs/server-admin-4.9/modules/operator/pages/circleci-server-security-features.adoc b/docs/server-admin-4.9/modules/operator/pages/circleci-server-security-features.adoc new file mode 100644 index 0000000000..bee7a92961 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/circleci-server-security-features.adoc @@ -0,0 +1,156 @@ += CircleCI server security features +:page-platform: Server 4.9, Server Admin +:page-description: This document outlines security features built into CircleCI server 4.9 and related integrations. +:experimental: + +This document outlines security features built into CircleCI and related integrations. + +[#security-overview] +== Overview +Security is our top priority at CircleCI. We are proactive and we act on security issues immediately. Report security issues to mailto:security@circleci.com[] with an encrypted message using our security team's GPG key (ID: `0x4013DDA7`, fingerprint: `3CD2 A48F 2071 61C0 B9B7 1AE2 6170 15B8 4013 DDA7`). + +[#encryption] +== Encryption +CircleCI uses HTTPS or SSH for all networking in and out of our service, including: + +* From the browser to our services application. +* From the services application to your builder fleet. +* From our builder fleet to your source control system, and all other points of communication. + +None of your code or data travels to or from CircleCI without being encrypted, unless you have code in your builds that does so at your discretion. Operators may also choose to bypass our SSL configuration or not use TLS for communicating with underlying systems. + +The nature of CircleCI is that our software has access to your code and whatever data that code interacts with. With the exception of self-hosted runner, all jobs on CircleCI run in a sandbox (specifically, a container or an ephemeral VM). THe sandbox stands alone from all other builds and is not accessible from the Internet or from your own network. + +The build agent pulls code via git over SSH. Your test suite or job configurations may call out to external services or integration points within your network. The response from such calls will be pulled into your jobs and used at your discretion. After a job is complete, the container that ran the job is destroyed and rebuilt. All environment variables are encrypted using link:https://www.vaultproject.io/[HashiCorp Vault]. Environment variables are encrypted using AES256-GCM96 and are unavailable to CircleCI employees. + +[#sandboxing] +== Sandboxing +With CircleCI, you control the resources allocated to run the builds of your code. With the exception of self-hosted runner, this will be done through instances of our builder boxes that set up the containers in which your builds will run. By their nature, build containers will pull down source code and run whatever test and deployment scripts are part of the codebase or your configuration. The containers are sandboxed, each created and destroyed for one build only (or one slice of a parallel build), and they are not available from outside themselves. The CircleCI service provides the ability to SSH directly to a particular build container. When accessing a container this way, a user will have complete access to any files or processes being run inside that build container. Only provide CircleCI access to those also trusted with your source code. + +[#integrations] +== Integrations +A few different external services and technology integration points touch CircleCI. The following list explains those integration points. + +[#web-sockets] +=== Web sockets + +CircleCI uses link:https://pusher.com/[Pusher] client libraries for WebSocket communication between the server and the browser for jobs such as: + +* Updating builds lists dynamically. +* Displaying the output of a build line-by-line as it occurs. + +For CircleCI server installations, we use an internal server called Slanger, so Pusher servers have no access to your instance of CircleCI, nor your source control system. CircleCI sends build status and build output through the web socket server. Unless you have configured your installation to run without SSL, this is done using the same certs over SSL, so it is encrypted in transit. + +[#source-control-systems] +=== Source control systems + +To use CircleCI you will set up a direct connection with your instance of GitHub Enterprise or GitHub.com. When you set up CircleCI, you authorize the system to check out your private repositories. You may revoke this permission at any time through your GitHub application settings page and by removing CircleCI's Deploy Keys and Service Hooks from your repositories' Admin pages. CircleCI allows you to selectively build your projects but GitHub's permissions model is "all or nothing". That is, CircleCI gets permission to access all of a user's repositories or none of them. Your instance of CircleCI has access to anything hosted in your git repositories and will create webhooks for a variety of events. These webhooks call back to CircleCI, triggering one or more git commands that will pull down code to your build fleet. For example: + +* When code is pushed. +* When a user is added. + +[#dependency-and-cource-caches] +=== Dependency and source caches + +Most CircleCI customers use S3 or equivalent cloud-based storage inside their private cloud infrastructure (Amazon VPC, etc) to store their dependency and source caches. These storage servers are subject to the normal security parameters of anything stored on such services, meaning in most cases our customers prevent any outside access. + +[#artifacts] +=== Artifacts + +It is common to use S3 or similar hosted storage for artifacts. Assuming these resources are secured per your normal policies, they are as safe from any outside intrusion as any other data you store there. + +[#audit-logs] +== Audit logs +The audit log feature is available for CircleCI installed on your servers or private cloud. + +CircleCI logs important events in the system for audit and forensic analysis purposes. Audit logs are separate from system logs that track performance and network metrics. + +Complete audit logs may be downloaded as a CSV file from the audit log page within the admin section of the application. Audit log fields with nested data contain JSON blobs. + +TIP: The audit log download can take a very long time to start. We recommend clicking the **Download** button once and leaving it to run. + +NOTE: In some situations, the internal machinery may generate duplicate events in the audit logs. The `id` field of the downloaded logs is unique per event and can be used to identify duplicate entries. + +[#audit-log-events] +=== Audit log events + +// TODO: automate this from event-cataloger +The following are the system events that are logged. See `action` in the <<audit-log-fields,Field section below>> for the definition and format. + +- context.create +- context.delete +- context.env_var.delete +- context.env_var.store +- context.secrets.accessed +- project.env_var.create +- project.env_var.delete +- project.settings.update +- user.create +- user.logged_in +- user.logged_out +- workflow.job.approve +- workflow.job.finish +- workflow.job.scheduled +- workflow.job.start + +[#audit-log-fields] +=== Audit log fields + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Field | Description + +| `action` +| The action taken that created the event. The format is ASCII lowercase words separated by dots, with the entity acted upon first and the action taken last. In some cases entities are nested, for example, `workflow.job.start`. + +| `actor` +| The actor who performed this event. In most cases, this will be a CircleCI user. This data is a JSON blob that will always contain `id` and `type` and will likely contain `name`. + +| `target` +| The entity instance acted upon for this event, for example, a project, an org, an account, or a build. This data is a JSON blob that will always contain `id` and `type` and will likely contain `name`. + +| `payload` +| A JSON blob of action-specific information. The schema of the payload is expected to be consistent for all events with the same `action` and `version`. + +| `occurred_at` +| When the event occurred in UTC expressed in ISO-8601 format with up to nine digits of fractional precision, for example '2017-12-21T13:50:54.474Z'. + +| `metadata` +| A set of key/value pairs that can be attached to any event. All keys and values are strings. This can be used to add additional information to certain types of events. + +| `id` +| A UUID that uniquely identifies this event. This is intended to allow consumers of events to identify duplicate deliveries. + +| `version` +| Version of the event schema. Currently the value will always be 1. Later versions may have different values to accommodate schema changes. + +| `scope` +| If the target is owned by an account in the CircleCI domain model, the account field should be filled in with the account name and ID. This data is a JSON blob that will always contain `id` and `type` and will likely contain `name`. + +| `success` +| A flag to indicate if the action was successful. + +| `request` +| If this event was triggered by an external request, this data will be populated and may be used to connect events that originate from the same external request. The format is a JSON blob containing `id` (the unique ID assigned to this request by CircleCI). +|=== + +[#checklist-to-using-securely-as-a-customer] +== Checklist to using CircleCI securely as a customer + +If you are getting started with CircleCI, there are some points you can ask your team to consider for security best practices as _users_ of CircleCI: + +* Minimize the number of secrets (private keys / environment variables) your + build needs and rotate secrets regularly. + ** It is important to rotate secrets regularly in your organization, especially as team members come and go. + ** Rotating secrets regularly means your secrets are only active for a certain amount of time, helping to reduce possible risks if keys are compromised. + ** Ensure the secrets you _do_ use are of limited scope, with only enough permissions for the purposes of your build. Consider carefully adjudicating the role and permission systems of other platforms you use outside of CircleCI; for example, when using something such as IAM permissions on AWS, or GitHub's link:https://developer.github.com/v3/guides/managing-deploy-keys/#machine-users[Machine User] feature. +* Sometimes user misuse of certain tools might accidentally print secrets to stdout which will appear in your logs. Be aware of the following: + ** Running `env` or `printenv` which will print all your environment variables to stdout. + ** Literally printing secrets in your codebase or in your shell with `echo`. + ** Programs or debugging tools that print secrets on error. +* Consult your VCS provider's permissions for your organization (if you are in an organization) and try to follow the link:https://en.wikipedia.org/wiki/Principle_of_least_privilege[Principle of Least Privilege]. +* Use Restricted Contexts with teams to share environment variables with a select security group. Read through the xref:guides:security:contexts.adoc#restrict-a-context[contexts] document to learn more. +* Ensure you regularly audit who has access to SSH keys in your organization. +* Ensure that your team is using Two-Factor Authentication (2FA) with your VCS (https://help.github.com/en/articles/securing-your-account-with-two-factor-authentication-2fa[GitHub 2FA], link:https://confluence.atlassian.com/bitbucket/two-step-verification-777023203.html[Bitbucket]). If a user's GitHub or Bitbucket account is compromised, a nefarious actor could push code or potentially steal secrets. +* If your project is open source and public, make note of whether you want to share your environment variables. On CircleCI, you can change a project's settings to control whether your environment variables can pass on to _forked versions of your repository_. This is **not enabled** by default. You can read more about these settings and open source security in our xref:guides:integration:oss.adoc#security[Open Source Projects Document]. diff --git a/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc b/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc new file mode 100644 index 0000000000..abf3297357 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc @@ -0,0 +1,362 @@ += Configuring external services +:page-platform: Server 4.9, Server Admin +:page-description: This document describes how to configure the following external services for use with a CircleCI server 4.9 installation +:experimental: + +This page describes how to configure external services for use with either a new CircleCI server 4.9 installation or migrating internal PostgreSQL and MongoDB data from existing CircleCI server 4.9 installation to your externalized datastores. + +[#postgresql] +== PostgreSQL + +[#best-practices-for-your-postgresql] +=== Best practices for PostgreSQL + +NOTE: Your externalized PostgreSQL instance needs to be version 12.1 or higher. + +Consider running at least two PostgreSQL replicas to allow recovery from primary failure and for backups. The table below shows the recommended specifications for PostgreSQL machines: + +[.table.table-striped] +[cols=6*, options="header", stripes=even] +|=== +|# of Daily Active Users +|# of PostgreSQL Replicas +|CPU +|RAM +|Disk +|NIC Speed + +|<50 +|2 +|8 Cores +|16 GB +|100 GB +| 1 Gbps + +|50 - 250 +|2 +|8 Cores +|16 GB +|200 GB +|1 Gbps + +|250 - 1000 +|3 +|8 Cores +|32 GB +|500 GB +|10 Gbps + +|1000 - 5000 +|3 +|8 Cores +|32 GB +|1 TB +|10 Gbps + +|5000+ +|3 +|8 Cores +|32 GB +|1 TB +|10 Gbps +|=== + +[#migrating-from-internal-postgres] +=== Migrating from an internal PostgreSQL to an externalized source + +NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-postgres>> + +When a CircleCI server instance is deployed, Postgres is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external Postgres, you may use the guide below to migrate your Postgres data to your external database. + +CAUTION: This process requires downtime. + +==== 1. Disable the application + +Disable the CircleCI server application by scaling down the application layer pods. No Data is lost in this process, but the application will be unreachable. + +Scale down your application layer pods: + +[source,shell] +---- +namespace=<your-server-namespace> +kubectl -n "$namespace" scale deploy -l "layer=application" --replicas="0" +---- + +Running `kubectl -n "$namespace" get pods` will show most of your pods scaling to down, leaving your database pods running including PostgreSQL. + +==== 2. Validate access to your external PostgreSQL from within the cluster (optional) + +. Confirm that pods within your CircleCI server cluster can access your external PostgreSQL. You can do this from within your internal PostgreSQL. ++ +[source,shell] +---- +PG_POD=$(kubectl -n "$namespace" get pods | grep postgresql | tail -1 | awk '{print $1}') +kubectl exec -it -n "$namespace" "$PG_POD" -- bash +---- + +. While still connected to the pod run: ++ +[source,shell] +---- +psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> +---- + +You should be able to connect to your external Postgres at this point. If not, resolve any issues before proceeding. + +TIP: You may use `helm upgrade ...` to restore your CircleCI server instance to a running state. + +==== 3. Generate export of your internal PostgreSQL + +. Retrieve your internal Postgres credentials: ++ +[source,shell] +---- +PG_PASSWORD=$(kubectl -n "$namespace" get secrets postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +---- ++ +NOTE: The username for your internal Postgres is `postgres`. The password is randomly generated unless directly set at installation. + +. Connect to your Postgres pod and perform a Postgres dump: ++ +[source,shell] +---- +kubectl -n "$namespace" exec -it "$PG_POD" -- bash -c "export PGPASSWORD='$PG_PASSWORD' && pg_dumpall -U postgres -c" > circle.sql +---- ++ +NOTE: This backup is created in the filesystem used by the Postgres pod. If you wish to store it locally, you may use `kubectl cp -n "$namespace" "$PG_POD":circle.sql /local/dir/circle.sql` + +. Clean up the Postgres Dump. Your internally deployed Postgres uses the username `postgres`. However, during the restore, the Postgres dump will drop all resources before trying to create new ones, including the `postgres` user. Access the Postgres pod where the dump is stored and run the following commands on the Postgres dump file to remove the lines that would delete the Postgres user. ++ +[source,shell] +---- +PG_POD=$(kubectl -n "$namespace" get pods | grep postgresql | tail -1 | awk '{print $1}') +kubectl exec -it -n "$namespace" "$PG_POD" -- bash + +sed -i".bak" '/DROP ROLE postgres/d' circle.sql +sed -i".bak" '/CREATE ROLE postgres/d' circle.sql +sed -i".bak" '/ALTER ROLE postgres WITH SUPERUSER INHERIT CREATEROLE CREATEDB LOGIN REPLICATION BYPASSRLS PASSWORD/d' circle.sql +---- + +==== 4. Restore your data in your external PostgreSQL + +While still connected to your the internally deployed Postgres, restore the dumped data to your external Postgres: + +[source,shell] +---- +psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> < circle.sql +---- + +Now your external Postgres will have your CircleCI server data. In the next section you will update CircleCI server to point to your external Postgres. + +[#connecting-your-external-postgres] +=== Connecting your external PostgreSQL instance to CircleCI server + +Once you have set up your external PostgreSQL instance, add the following to your `values.yaml` file so that your CircleCI server instance can access it. + +[source,yaml] +---- +postgresql: + internal: false + postgresqlHost: <domain> # The domain or IP address of your PostgreSQL instance + postgresqlPort: <port> # The port of your PostgreSQL instance +---- + +NOTE: `postgresql.internal: false` will remove any previously deployed PostgreSQL instance deployed internally + +[tab.postgres.Create_secret_yourself] +-- +Create the secret and then add the following values to `values.yaml`: + +[source,shell] +---- +kubectl create secret generic postgresql \ + --from-literal=postgres-password=<postgres-password> +---- + +[source,yaml] +---- +postgresql: + ... + auth: + username: <username> + existingSecret: postgresql +---- +-- + +[tab.postgres.CircleCI_creates_secret] +-- +Add the following to +the `values.yaml` file. CircleCI will create the secret automatically: + +[source,yaml] +---- +postgresql: + ... + auth: + username: <username> # A user with the appropriate privileges to access your PostgreSQL instance. + password: <password> # The password of the user account used to access your PostgreSQL instance. +---- +-- + +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized PostgreSQL instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. + + +[#backing-up-postgresql] +=== Back up PostgreSQL +PostgreSQL provides official documentation for backing up and restoring your PostgreSQL 12 install, which can be found link:https://www.postgresql.org/docs/12/backup.html[here]. + +We strongly recommend the following: + +* Take daily backups +* Keep at least 30 days of backups +* Use encrypted storage for backups as databases might contain sensitive information +* Perform a backup before each upgrade of CircleCI server + +[#mongodb] +== MongoDB + +NOTE: If using your own MongoDB instance, it needs to be version 3.6 or higher. + +[#migrating-from-internal-mongodb] +=== Migrating from an internal MongoDB to an externalized source + +NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-mongodb>> + +When a CircleCI server instance deployed, MongoDB is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external MongoDB, you may use the guide below to migrate your Mongo data to your external database. + +CAUTION: This process requires downtime. + +==== 1. Disable the application + +Disable the CircleCI server application by scaling down the application layer pods. No Data is lost in this process, but the application will be unreachable. + +Scale down your application layer pods: + +[source,shell] +---- +namespace=<your-server-namespace> +kubectl -n "$namespace" scale deploy -l "layer=application" --replicas="0" +---- + +Running `kubectl -n "$namespace" get pods` will show most of your pods scaling to down, leaving your database pods running, including Mongo. + +==== 2. Validate access to your external MongoDB from within the cluster (optional) + +. Confirm that pods within your CircleCI server cluster can access your external MongoDB. You can do this from within your internal MongoDB pod: ++ +[source,shell] +---- +MONGO_POD="mongodb-0" +kubectl exec -it -n "$namespace" "$MONGO_POD" -- bash +---- + +. While still connected to the pod run the following: ++ +[source,shell] +---- +mongo --username <username> --password --authenticationDatabase admin --host <external-mongodb-host> --port <external-mongodb-port> +---- + +You should be able to connect to your external MongoDB at this point. If not, resolve any issues before proceeding. + +TIP: You may use `helm upgrade ...` to restore your CircleCI server instance to a running state. + +==== 3. Generate export of your internal MongoDB + +. Retrieve your internal MongoDB credentials: ++ +[source,shell] +---- +MONGO_POD="mongodb-0" +MONGODB_USERNAME="root" +MONGODB_PASSWORD=$(kubectl -n "$namespace" get secrets mongodb -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) +---- + +. Create a backup directory in your MongoDB pod: ++ +[source,shell] +---- +kubectl -n "$namespace" exec "$MONGO_POD" -- mkdir -p /tmp/backups/ +---- + +. Generate a MongoDB database dump to the backup directory you just created: ++ +[source,shell] +---- +kubectl -n "$namespace" exec -it "$MONGO_POD" -- bash -c "mongodump -u '$MONGODB_USERNAME' -p '$MONGODB_PASSWORD' --authenticationDatabase admin --db=circle_ghe --out=/tmp/backups/" +---- + +==== 4. Restore your data in your external MongoDB + +Use the generated MongoDB backup to restore the data to your external MongoDB: + +[source,shell] +---- +kubectl -n "$namespace" exec "$MONGO_POD" -- mongorestore --drop -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --host <external-mongodb-host> --port <external-mongodb-port> --authenticationDatabase admin /tmp/backups/circle_ghe; +---- + +Now your external MongoDB will have your CircleCI server data. In the next section you will update CircleCI server to point to your external MongoDB. + +[#connecting-your-external-mongodb] +=== Connecting your external MongoDB instance to CircleCI server + +Once you have configured your external MongoDB instance, add the following to your `values.yaml` file to connect your CircleCI server instance. + +[source,yaml] +---- +mongodb: + internal: false + hosts: <hostname:port> # this can be a comma-separated list of multiple hosts for sharded instances + ssl: <ssl-enabled> + # If using an SSL connection with custom CA or self-signed certs, set this + # to true + tlsInsecure: false + # Any other options you'd like to append to the MongoDB connection string. + # Format as query string (key=value pairs, separated by &, special characters + # need to be URL encoded) + options: <additional-options> + auth: + database: <authentication-source-database + mechanism: SCRAM-SHA-1 +---- + +[tab.mongo.Create_secret_yourself] +-- +Create the secret and then add the following values to `values.yaml`: + +[source,shell] +---- +kubectl create secret generic mongodb \ +--from-literal=mongodb-root-password=<root-password> \ +--from-literal=mongodb-password=dontmatter +---- + +[source,yaml] +---- +mongodb: + ... + auth: + ... + username: <username> + existingSecret: mongodb +---- +-- + +[tab.mongo.CircleCI_creates_secret] +-- +Add the following to +the `values.yaml` file. CircleCI will create the secret automatically: + +[source,yaml] +---- +mongodb: + ... + auth: + ... + username: <username> + rootPassword: <root-password> + password: <password> +---- +-- + +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized MongoDB instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. diff --git a/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc b/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc new file mode 100644 index 0000000000..6041f84613 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc @@ -0,0 +1,90 @@ += Data retention in server +:page-platform: Server 4.9, Server Admin +:page-description: Learn how to configure data retention policies for MongoDB, PostgreSQL, and object storage buckets in your CircleCI server installation. +:experimental: + +[#background] +== Background + +You can set up retention policies for both Mongo and PostgreSQL to clean up data older than *n* days. The following sections outline the step-by-step process for implementing these retention policies in your server environment. + +[#setting-postgres-retention] +== Step 1: Setting a retention period for PostgreSQL + +. Connect to a REPL session in one of the frontend pods by running the following commands: ++ +[source,bash] +---- +kubectl exec -it <frontend-xxx> -- /bin/bash +---- ++ +Then, connect to the REPL: ++ +[source,bash] +---- +lein repl :connect 6005 +---- + +. Once connected, the current setting can be verified using the following command: ++ +[source,clojure] +---- +(circle.http.api.admin-commands/get-setting :wfc-workflow-deletion-retention-period) +---- + +. The retention period can be set as needed (the example below sets it to 90 days): ++ +[source,clojure] +---- +(circle.http.api.admin-commands/set-setting :wfc-workflow-deletion-retention-period 90) +---- + +. The deletion interval can be verified by running: ++ +[source,clojure] +---- +(circle.http.api.admin-commands/get-setting :wfc-workflow-deletion-interval) +---- ++ +By default, the interval is set to `0`. This value must be updated to a number greater than `0` for WFC deletion to run every *n* seconds. For example, the following command sets it to 1000 seconds: ++ +[source,clojure] +---- +(circle.http.api.admin-commands/set-setting :wfc-workflow-deletion-interval 1000) +---- + +. In instances with significant data volumes, additional `workflows_conductor_event_consumer` replicas may be required to ensure deletion progresses smoothly until it aligns with the configured retention period. + +. The WFC event consumer pod logs can be checked to verify that deletion is progressing without errors. + +. The oldest `created_at` date for a job can be verified to ensure alignment with the retention period using the following command: ++ +[source,bash] +---- +kubectl exec postgresql-0 -- sh -c 'PGPASSWORD=$POSTGRES_PASSWORD psql -U "postgres" -d "conductor_production" -c "SELECT * FROM public.jobs ORDER BY created_at ASC LIMIT 2;"' +---- + +[#setting-mongodb-retention] +== Step 2: Setting a retention period for MongoDB (action logs) + +Retention limits for action logs can be configured in the same REPL session using the following commands: + +[source,clojure] +---- +(circle.http.api.admin-commands/set-setting :delete-old-builds.retention-limit-days 180) +(circle.http.api.admin-commands/set-setting :delete-old-action-logs.enabled true) +---- + +[#setting-s3-lifecycle-policies] +== Step 3: Set up lifecycle policies for the S3 Bucket + +WARNING: **Risk of Irreversible Data Loss** + +Incorrect lifecycle settings may result in data being removed earlier than expected and without recovery options. CircleCI bears no responsibility or liability for any data loss resulting from lifecycle configurations applied to your object storage buckets. + +After configuring retention limits for your MongoDB and PostgresDB objects, you can also apply object expiry policies to your S3 or GCS buckets. These policies typically expire objects at n+1 days, where n is the retention period set for your databases. + +NOTE: **Object Storage Paths** + +If a retention policy of `n` days is configured for both MongoDB and PostgreSQL data, you can set `n+1` for **all objects** in your S3/GCS buckets to expire at `n+1` days. This ensures alignment between database retention and object storage retention. + +CAUTION: **Audit Logs** + +Be sure to configure exceptions for critical paths, such as `audit-logs/*`, in accordance with your organization's compliance or audit requirements. Objects under these paths should not be expired by default. diff --git a/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc b/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc new file mode 100644 index 0000000000..8940d58a92 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc @@ -0,0 +1,325 @@ += Expanding internal database volumes +:page-platform: Server 4.9, Server Admin +:page-description: Expanding internal database volumes for CircleCI server 4.9. +:experimental: + +[#overview] +== Overview + +If you have chosen to deploy either of the CircleCI databases (MongoDB or PostgreSQL) within the cluster, rather than externally provisioning these databases, there may come a point at which the storage space initially made available to these databases is no longer sufficient. Internal databases in your Kubernetes cluster make use of link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/[persistent volumes] for persistent storage. The size of these volumes is determined by persistence volume claims (PVCs). These PVCs request storage space based on what has been made available to the nodes in your cluster. + +This document runs through the steps required to increase PVCs to expand the space available to your internally deployed databases. This operation should not require any downtime, unless you need to restart your database pods. + +NOTE: Expanding persistent volumes does not affect the size of the storage attached to your nodes. Expanding node storage remains within the limitations of your cloud provider. Refer to the docs for your chosen cloud provider for details on how to expand the storage attached to your cluster's nodes. + +[#resizing-persistent-volume-claims] +== Resizing persistent volume claims +Below are the steps detailing how to resize the persistent volume claims for PostgreSQL and MongoDB. You will confirm the size of the claims and the disk space made available to your databases before and after this operation. + +NOTE: As a precaution, it is always a good idea to xref:backup-and-restore.adoc#[create a backup of your cluster] first. + +[#confirm-current-volume-size] +=== 1. Confirm current volume size +By default, the persistent volume claims used by our internal databases have a capacity of 8Gi. However, this initial value can be set at the time of first deployment via the Helm values file. You can confirm the size of your persistent volume claim capacity using the command: `kubectl get pvc <pvc-name>`. + +* **PostgreSQL** ++ +[source,bash] +---- +circleci-user ~ $ kubectl get pvc data-postgresql-0 + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +data-postgresql-0 Bound pvc-c2a2d97b-2b7d-47d3-ac77-d07c76c995a3 8Gi RWO gp2 1d +---- + +* **MongoDB** ++ +[source,bash] +---- +circleci-user ~ $ kubectl get pvc datadir-mongodb-0 + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +datadir-mongodb-0 Bound pvc-58a2274c-31c0-487a-b329-0062426b5535 8Gi RWO gp2 1d +---- + +* **Redis** ++ +[source,bash] +---- +circleci-user ~ $ kubectl get pvc redis-data-redis-master-0 + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +redis-data-redis-master-0 Bound pvc-522b3e1c-172d-482c-8648-c24896d18a72 8Gi RWO gp2 64m + +circleci-user ~ $ kubectl get pvc redis-data-redis-slave-0 + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +redis-data-redis-slave-0 Bound pvc-bfdf976d-6f8b-4136-aaa0-b5fc3fce826b 8Gi RWO gp2 64m +---- + +You can also confirm this capacity is made available to a database by checking the size of its data directory. + +* For **PostgreSQL**, the directory is `/bitnami/postgresql`. You can confirm its size using the command below. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec postgresql-0 -- df -h /bitnami/postgresql + +Filesystem Size Used Avail Use% Mounted on +/dev/nvme4n1 7.8G 404M 7.4G 3% /bitnami/postgresql +---- + +* For **MongoDB**, the directory is `/bitnami/mongodb`. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec mongodb-0 -- df -h /bitnami/mongodb + +Filesystem Size Used Avail Use% Mounted on +/dev/nvme1n1 7.8G 441M 7.4G 3% /bitnami/mongodb +---- + +* For **Redis**, the directory is `/data`. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec redis-master-0 -- df -h /data + +Filesystem Size Used Avail Use% Mounted on +/dev/nvme2n1 8G 156K 8G 1% /data + +circleci-user ~ $ kubectl exec redis-slave-0 -- df -h /data + +Filesystem Size Used Avail Use% Mounted on +/dev/nvme2n1 8G 156K 8G 1% /data +---- + +From the examples above, the capacities are still 8Gi. The following steps show how to increase this to 10Gi. + +[#confirm-volume-expansion-is-allowed] +=== 2. Confirm volume expansion is allowed +Confirm that volume expansion is allowed in your cluster: + +[source,bash] +---- +circleci-user ~ $ kubectl get sc + +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 1d +---- + +If your default storage class has "ALLOWVOLUMEEXPANSION" set to false, like in the above example, you can change this with the following `kubectl patch` command: + +[source,bash] +---- +circleci-user ~ $ kubectl patch sc gp2 -p '{"allowVolumeExpansion": true}' + +storageclass.storage.k8s.io/gp2 patched +circleci-user ~ $ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 1d +---- + +Now you may proceed to expanding your volumes. + +[#delete-the-databases-stateful-set] +=== 3. Delete the database's stateful set +In this step, you will delete the stateful set, which controls your database pod. The command below deletes the referenced database's stateful set without deleting the pod. You do not want to delete the pod itself, as this would cause downtime. In the following steps, you will redeploy your stateful set. You might chose to delete one or both stateful sets, depending on which database volumes you wish to expand. The `--cascade=orphan` flag is most important here. + +* **PostgreSQL** ++ +[source,bash] +---- +kubectl delete sts postgresql --cascade=orphan +---- + +* **MongoDB** ++ +[source,bash] +---- +kubectl delete sts mongodb --cascade=orphan +---- + +* **Redis** ++ +[source,bash] +---- +kubectl delete sts redis-master redis-slave --cascade=orphan +---- + +[#update-the-size-of-the-databases-pvc] +=== 4. Update the size of the database's PVC +Now that the stateful set has been removed, you can increase the size of our persistent volume claim to 10Gi. + +* **PostgreSQL** ++ +[source,bash] +---- +kubectl patch pvc data-postgresql-0 -p '{"spec": {"resources": {"requests": {"storage": "10Gi"}}}}' +---- + +* **MongoDB** ++ +[source,bash] +---- +kubectl patch pvc datadir-mongodb-0 -p '{"spec": {"resources": {"requests": {"storage": "10Gi"}}}}' +---- + +* **Redis** ++ +[source,bash] +---- +kubectl patch pvc redis-data-redis-master-0 -p '{"spec": {"resources": {"requests": {"storage": "10Gi"}}}}' +kubectl patch pvc redis-data-redis-slave-0 -p '{"spec": {"resources": {"requests": {"storage": "10Gi"}}}}' +---- + +[#update-kots-admin-console-with-the-new-pvc-size] +=== 5. Update Helm values file with the new PVC size +Now you need to upgrade the server installation by modifying the PVC size in the Helm values file to persist your changes. In the Helm values file, you will update the values for your PVC size to 10Gi as shown below. + +* **PostgreSQL** ++ +[source,yaml] +---- +postgresql: + primary: + persistence: + size: 10Gi +---- + +* **MongoDB** ++ +[source,yaml] +---- +mongodb: + persistence: + size: 10Gi +---- + +* **Redis** ++ +[source,yaml] +---- +redis: + master: + persistence: + size: 10Gi + slave: + persistence: + size: 10Gi +---- + +Now save and deploy your changes. This recreates the stateful set(s) that you destroyed earlier, but with the new PVC sizes, which will persist through new releases. + +[source,shell] +---- +helm upgrade <release-name> -n <namespace> -f < helm-value-file> <chart-dictectory> +---- + +[#validate-new-volume-size] +=== 6. Validate new volume size +Once deployed, you can validate the size of the data directories assigned to our databases. + +* For **PostgreSQL** the directory is `/bitnami/postgresql`. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec postgresql-0 -- df -h /bitnami/postgresql +Filesystem Size Used Avail Use% Mounted on +/dev/nvme4n1 9.8G 404M 9.4G 5% /bitnami/postgresql +---- + +* For **MongoDB** the directory is `/bitnami/mongodb`. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec mongodb-0 -- df -h /bitnami/mongodb +Filesystem Size Used Avail Use% Mounted on +/dev/nvme1n1 9.8G 441M 9.3G 5% /bitnami/mongodb +---- + +* For **Redis** the directory is `/data`. ++ +[source,bash] +---- +circleci-user ~ $ kubectl exec redis-master-0 -- df -h /data +Filesystem Size Used Avail Use% Mounted on +/dev/nvme2n1 10G 156K 10G 1% /data + +circleci-user ~ $ kubectl exec redis-slave-0 -- df -h /data +Filesystem Size Used Avail Use% Mounted on +/dev/nvme2n1 10G 156K 10G 1% /data +---- + +As you can see, the size of your directories has been increased. + +When completing these steps, if you find, as expected, that the new pods _do_ show the resized volumes, it is still worth checking with the `kubectl describe` commands shown below. In some instances the resize will fail, but the only way to know is by viewing an event in the output from `kubectl describe`. + +* **PostgreSQL** ++ +[source,bash] +---- +kubectl describe pvc data-postgresql-0 +---- + +* **MongoDB** ++ +[source,bash] +---- +kubectl describe pvc datadir-mongodb-0 +---- + +* **Redis** ++ +[source,bash] +---- +kubectl describe pvc redis-data-redis-master-0 +kubectl describe pvc redis-data-redis-slave-0 +---- + +A successful output looks like this: + +[source,shell] +---- +Events: +Type Reason Age From Message + +Normal FileSystemResizeSuccessful 19m kubelet MountVolume.NodeExpandVolume succeeded for volume "pvc-b3382dd7-3ecc-45b0-aeff-45edc31f48aa" +---- + +Failure might look like this: + +[source,shell] +---- +Warning VolumeResizeFailed 58m volume_expand error expanding volume "circleci-server/datadir-mongodb-0" of plugin "kubernetes.io/aws-ebs": AWS modifyVolume failed for vol-08d0861715c313887 with VolumeModificationRateExceeded: You've reached the maximum modification rate per volume limit. Wait at least 6 hours between modifications per EBS volume. +status code: 400, request id: 3bd43d1e-0420-4807-9c33-df26a4ca3f23 +Normal FileSystemResizeSuccessful 55m (x2 over 81m) kubelet MountVolume.NodeExpandVolume succeeded for volume "pvc-29456ce2-c7ff-492b-add4-fcf11872589f" +---- + +[#troubleshoot] +== Troubleshoot + +After following these steps, if you find that the disk size allocated to your data directories has not increased, then you may need to restart your database pods. This will cause downtime of 1-5 minutes while the databases restart. You can use the commands below to restart your databases. + +* **PostgreSQL** ++ +[source,bash] +---- +kubectl rollout restart sts postgresql +---- + +* **MongoDB** ++ +[source,bash] +---- +kubectl rollout restart sts mongodb +---- + +* **Redis** ++ +[source,bash] +---- +kubectl rollout restart sts redis-master redis-slave +---- + +NOTE: Running out of disk space for either MongoDB or PostgreSQL may result in failures in CircleCI server such as job failures. These jobs may become stuck as the disk space runs out and will need to be cancelled and rerun once the volumes have been expanded. diff --git a/docs/server-admin-4.9/modules/operator/pages/faq.adoc b/docs/server-admin-4.9/modules/operator/pages/faq.adoc new file mode 100644 index 0000000000..8ebf5fe979 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/faq.adoc @@ -0,0 +1,13 @@ += CircleCI server 4.9 FAQ +:page-platform: Server 4.9, Server Admin +:page-description: Find answers about the CircleCI server 4.9 data retention policy, what control is granted over Nomad certificates. +:experimental: + +## Does server 4.9 have a data retention policy? +Data retention for MongoDB and PostgreSQL can be configured by following our guide xref:data-retention.adoc[Data Retention in Server]. + +## What control is granted over Nomad certificates? +Full control of the certificates, all the way down to mTLS for Nomad. + +## Is it possible to change or disable the polling time which checks for health status? +No, this is not customizable. diff --git a/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc b/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc new file mode 100644 index 0000000000..2f838607b0 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc @@ -0,0 +1,159 @@ += Introduction to Nomad cluster operation +:page-platform: Server 4.9, Server Admin +:page-description: Learn how to operate the Nomad Cluster in your CircleCI server 4.9 installation. +:experimental: + +CircleCI uses link:https://www.nomadproject.io/[Nomad] as the primary job scheduler. This section provides a basic introduction to Nomad for understanding how to operate the Nomad Cluster in your CircleCI installation. + +[#basic-terminology-and-architecture] +== Basic terminology and architecture + +.Nomad Cluster Management +image::guides:ROOT:nomad-diagram-v2.png[Diagram of the Nomad cluster] +<<< +- **Nomad server:** Nomad servers are the brains of the cluster. They receive and allocate jobs to Nomad clients. In CircleCI server, a Nomad server runs as a service in your Kubernetes cluster. + +- **Nomad client:** Nomad clients execute the jobs they are allocated by Nomad servers. Usually a Nomad client runs on a dedicated machine (often a VM) to take full advantage of machine power. You can have multiple Nomad clients to form a cluster and the Nomad server allocates jobs to the cluster with its scheduling algorithm. + +- **Nomad jobs:** A Nomad job is a specification, provided by a user, that declares a workload for Nomad. A Nomad job corresponds to an execution of a CircleCI job. If the job uses xref:guides:optimize:parallelism-faster-jobs.adoc#[parallelism], for example `parallelism: 10`, then Nomad runs 10 jobs. + +- **Build agent:** Build agent is a Go program written by CircleCI that executes steps in a job and reports the results. Build agent is executed as the main process inside a Nomad job. + +[#basic-operations] +== Basic operations + +The following section is a basic guide to operating a Nomad cluster in your installation. + +The `nomad` CLI is installed in the Nomad pod. It is preconfigured to talk to the Nomad cluster, so it is possible to use `kubectl` along with the `nomad` command to run the commands in this section. + +[#checking-the-jobs-status] +=== Checking the jobs status + +The get a list of statuses for all jobs in your cluster, run the following command: + +[source,shell] +---- +kubectl exec -it <nomad-server-pod-ID> -- nomad status +---- + +The `Status` is the most important field in the output, with the following status type definitions: + +- `running`: Nomad has started executing the job. This typically means your job in CircleCI is started. + +- `pending`: There are not enough resources available to execute the job inside the cluster. + +- `dead`: Nomad has finished executing the job. The status becomes `dead` regardless of whether the corresponding CircleCI job/build succeeds or fails. + +[#checking-the-cluster-status] +=== Checking the cluster status + +To get a list of your Nomad clients, run the following command: + +[source,shell] +---- +kubectl exec -it <nomad-server-pod-ID> -- nomad node-status +---- + +NOTE: `nomad node-status` reports both Nomad clients that are currently serving (status `active`) and Nomad clients that were taken out of the cluster (status `down`). Therefore, you need to count the number of `active` Nomad clients to know the current capacity of your cluster. + +To get more information about a specific client, run the following command from that client: + +[source,shell] +---- +kubectl exec -it <nomad-server-pod-ID> -- nomad node-status -self +---- + +This gives information such as how many jobs are running on the client and the resource utilization of the client. + +[#checking-logs] +=== Checking logs + +A Nomad job corresponds to an execution of a CircleCI job. Therefore, Nomad job logs can sometimes help to understand the status of a CircleCI job if there is a problem. To get logs for a specific job, run the following command: + +[source,shell] +---- +kubectl exec -it <nomad-server-pod-ID> -- nomad logs -job -stderr <nomad-job-id> +---- + +NOTE: Be sure to specify the `-stderr` flag, as this is where most Build Agent logs appear. + +While the `nomad logs -job` command is useful, it is not always accurate because the `-job` flag uses a random allocation of the specified job. The term `allocation` is a smaller unit in Nomad Job, which is beyond the scope of this document. To learn more, see link:https://www.nomadproject.io/docs/internals/scheduling.html[the official document]. + +Complete the following steps to get logs from the allocation of the specified job: + +. Get the job ID with `nomad status` command. +. Get the allocation ID of the job with `nomad status <job-id>` command. +. Get the logs from the allocation with `nomad logs -stderr <allocation-id>` + +[#accessing-the-nomad-web-ui] +=== Accessing the Nomad Web UI + +Nomad provides a web UI for inspecting your Nomad cluster. If you are using an internalized Nomad deployment, which is the default setup with CircleCI server, follow the instructions in this section to temporarily access the UI for troubleshooting purposes. For a more permanent solution, consider externalizing Nomad and consult the official Nomad documentation for setting up routing. + +. Nomad binds to its Pod IP. To port-forward to the Nomad service, you need to set up a lightweight tunneling mechanism within the cluster as follows: ++ +[source,bash] +---- +export NOMAD_IP=$(kubectl get svc nomad-server -o jsonpath='{.spec.clusterIP}' -n <server-namespace>) + +kubectl run nomad-tunnel --rm -it --restart=Never --image=alpine/socat -n <server-namespace> -- TCP-LISTEN:4646,fork,reuseaddr TCP:$NOMAD_IP:4646 +---- + +. In another terminal, run the following command to set up port forwarding: ++ +[source,bash] +---- +kubectl port-forward pod/nomad-tunnel 4646:4646 -n <server-namespace> +---- + +. Navigate to ++`http://localhost:4646/ui`++ in your browser to access the Nomad UI. For more information on utilizing the Nomad UI, refer to the link:https://developer.hashicorp.com/nomad/tutorials/web-ui[Nomad documentation]. + +[#shutting-down-a-nomad-client] +=== Shutting down a Nomad client + +When you want to shut down a Nomad client, you must first set the client to `drain` mode. In `drain` mode, the client will finish any jobs that have already been allocated but will not be allocated any new jobs. + +. To drain a client, log in to the client and set the client to drain mode with `node-drain` command as follows: ++ +[source,shell] +---- +nomad node-drain -self -enable +---- +. Then, make sure the client is in drain mode using the `node-status` command: ++ +[source,shell] +---- +nomad node-status -self +---- + +Alternatively, you can drain a remote node with the following command, substituting the node ID: + +[source,shell] +---- +nomad node-drain -enable -yes <node-id> +---- + +[#scaling-down-the-client-cluster] +=== Scaling down the client cluster + +To set up a mechanism for clients to shutdown, first enter `drain` mode, then wait for all jobs to be finished before terminating the client. You can also configure an link:https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html[ASG Lifecycle Hook] that triggers a script for scaling down instances. + +The script should use the commands in the section above to do the following: + +. Put the instance in drain mode. +. Monitor running jobs on the instance and wait for them to finish. +. Terminate the instance. + +[#externalize-servers] +== Externalize your Nomad Servers +From server version 4.8, Nomad Servers may now be deployed externally to your Kubernetes cluster that hosts your installation of CircleCI server. Externalization of Nomad Servers is optional. Externalization of Nomad Servers can improve their stability. If you already have a CircleCI server instance with _internal_ Nomad Servers, the process to switch to external Nomad Servers is as follows: + +. Stop all builds on your CircleCI server instance. +. Follow our installation instructions for deploying Nomad Servers on either xref:installation:phase-3-aws-execution-environments.adoc#nomad-servers[AWS] or xref:installation:phase-3-gcp-execution-environments.adoc#nomad-servers[GCP]. + +ifndef::pdf[] +[#next-steps] +== Next steps + +* Read the xref:managing-user-accounts.adoc#[Managing user accounts] guide. +endif::[] diff --git a/docs/server-admin-4.9/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc b/docs/server-admin-4.9/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc new file mode 100644 index 0000000000..9a60274b0f --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc @@ -0,0 +1,254 @@ += Manage virtual machines with machine provisioner +:page-platform: Server 4.9, Server Admin +:page-description: CircleCI server 4.9 machine provisioner service controls how machine executor (Linux and Windows images) and Remote Docker jobs are run. +:experimental: + +Machine provisioner controls how xref:reference:ROOT:configuration-reference.adoc#machine[`machine`] executor and xref:guides:execution-managed:building-docker-images.adoc#[Remote Docker] jobs are run. + +This section describes the available configuration options for machine provisioner. + +NOTE: Prescaling controls will be exposed in a future release for tuning. + +CAUTION: We recommend that you leave these options at their defaults until you have successfully configured and verified the core and build services of your server installation. Steps to set up machine provisioner are provided in the installation guide for xref:installation:phase-3-aws-execution-environments.adoc#aws-machine-provisioner[AWS] and xref:installation:phase-3-gcp-execution-environments.adoc#gcp-authentication[GCP]. + +[#provider] +== Provider +The following configuration options are for the machine provisioner provider, either AWS or GCP. + +[#aws] +=== AWS +Configure machine provisioner to work with AWS EC2 in your `values.yaml` file. During installation you will have set up a security group and authentication. See the xref:installation:phase-3-aws-execution-environments.adoc#machine-provisioner[Installation Phase 3 - Execution Environments] page for more information. + +The information in this section describes post-installation configuration options for machine provisioner. + +[#windows-image-aws] +==== Windows image + +If you require Windows executors, you can supply an AMI ID in your `values.yaml` file. To create a Windows image, use the link:https://github.com/CircleCI-Public/circleci-server-windows-image-builder[CircleCI server Windows image builder]. + +[source,yaml] +---- +machine_provisioner: + providers: + ec2: + ... + windowsAMI: "<my-windows-ami>" +---- + +[#linux-image-aws] +==== Alternative Linux VM image + +If you wish to provide a custom AMI for Linux machine executors, you can supply an AMI ID in your `values.yaml` file. To create a Linux image, use the link:https://github.com/CircleCI-Public/circleci-server-linux-image-builder[CircleCI server Linux image builder]. + +[source,yaml] +---- +machine_provisioner: + providers: + ec2: + ... + linuxAMI: "<my-linux-ami>" +---- + +[#default-aws-ami-lists] +==== Default AWS AMI lists + +The default AMIs for server 4.9 are based on Ubuntu 22.04. + +[tabs] +==== +x86 AMI list:: ++ +-- +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Region +| AMI + +| `us-east-1` +| `ami-060a1e5d499c85d12` + +| `us-east-2` +| `ami-036fa23a3bd3b5013` + +| `ca-central-1` +| `ami-0e323ff60823ceb5c` + +| `ap-south-1` +| `ami-072d2c41d591ee7a5` + +| `ap-southeast-2` +| `ami-0880195a4612a9d61` + +| `ap-southeast-1` +| `ami-0177e901826e7b422` + +| `eu-central-1` +| `ami-00588b75fa9a72eff` + +| `eu-west-1` +| `ami-08a090a8c770fe41f` + +| `eu-west-2` +| `ami-056ff1116a86d9e87` + +| `sa-east-1` +| `ami-0117daec76cc825e7` + +| `us-west-1` +| `ami-09bc39e1cf1850c79` + +| `us-west-2` +| `ami-01f4236b649731f59` + +| `ap-northeast-1` +| `ami-0d71a7594982c0233` + +| `ap-northeast-2` +| `ami-06b70b9f78e46f8b4` + +| `eu-west-3` +| `ami-0af2e6edf47f7b7cc` + +| `us-gov-east-1` +| `ami-00e0319619e88edf7` + +| `us-gov-west-1` +| `ami-0fdf4561a0d3b0346` +|=== +-- +Arm AMI list:: ++ +-- +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Region +| AMI + +|`us-east-1` +|`ami-09688117b98d2fc85` + +|`us-east-2` +|`ami-0f74df98c729f23da` + +|`ca-central-1` +|`ami-0c38cb9bafc2d2367` + +|`ap-south-1` +|`ami-0bd058bca7362651a` + +|`ap-southeast-2` +|`ami-063193aeb76899a04` + +|`ap-southeast-1` +|`ami-0b93f7ee96e0544c8` + +|`eu-central-1` +|`ami-0603edd8ca55f1aea` + +|`eu-west-1` +|`ami-06284417985a234a6` + +|`eu-west-2` +|`ami-042850328df475cf4` + +|`sa-east-1` +|`ami-065ea52e0a95d6097` + +|`us-west-1` +|`ami-008f62919e40a0607` + +|`us-west-2` +|`ami-0f02394a567f27c57` + +|`ap-northeast-1` +|`ami-0cd7ad99dc5c77394` + +|`ap-northeast-2` +|`ami-02c787546f70518d8` + +|`us-gov-east-1` +|`ami-0b0841c7bba30ab06` + +|`us-gov-west-1` +|`ami-02268e2fe5572a88c` +|=== +-- +==== + +[#gcp] +=== GCP +Configure machine provisioner to work with GCP in your `values.yaml` file. During installation you will have set up a security group and authentication. See the xref:installation:phase-3-gcp-execution-environments.adoc#machine-provisioner[Installation Phase 3 - Execution Environments] page for more information. + +The information in this section describes post-installation configuration options for machine provisioner. + +[#windows-image-gcp] +==== Windows image + +If you require Windows executors, you can supply an AMI ID in your `values.yaml` file. To create a Windows image, use the link:https://github.com/CircleCI-Public/circleci-server-windows-image-builder[CircleCI server Windows image builder]. + +[source,yaml] +---- +machine_provisioner: + providers: + gcp: + ... + windowsImage: "<my-windows-image>" +---- + +[#linux-image-gcp] +==== Alternative Linux VM image + +If you wish to provide a custom AMI for Linux machine executors, you can supply an AMI ID in your `values.yaml` file. To create a Linux image, use the link:https://github.com/CircleCI-Public/circleci-server-linux-image-builder[CircleCI server Linux image builder]. + +[source,yaml] +---- +machine_provisioner: + providers: + gcp: + ... + linuxImage: "<my-linux-image>" +---- + +[#instance-preallocation] +== Instance preallocation + +CAUTION: When using preallocated instances be aware that a cron job is scheduled to cycle through these instances once per minute to ensure they do not end up in an unworkable state. + +To configure server to keep instances preallocated, use the keys shown in the following `machine-provisioner-config.yaml` examples: + +[source,yaml] +---- +# -- Configuration options for, and numbers of, prescaled instances for remote Docker jobs. +preboot: + scheduled: + - executor: linux + class: medium + image: docker-default + cron: "" + count: 2 +---- + +[source,yaml] +---- +# -- Configuration options for, and numbers of, prescaled instances for machine jobs. +preboot: + scheduled: + - executor: linux + class: medium + image: default + cron: "" + count: 2 +---- + +[#apply-changes] +== Apply changes + +Apply the changes made to your `values.yaml` file: + +[source,shell,subs=attributes+] +---- +namespace=<your-namespace> +helm upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion49} -f <path-to-values.yaml> +---- diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc new file mode 100644 index 0000000000..98ec21698a --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc @@ -0,0 +1,100 @@ += Managing build artifacts +:page-platform: Server 4.9, Server Admin +:page-description: Learn how CircleCI server 4.9 build artifacts persist data after a job is completed and how they can be used for longer-term storage of your build process outputs. +:experimental: + +Build artifacts persist data after a job is completed. They can be used for longer-term storage of your build process outputs. For example, when a Java build/test process finishes, the output of the process is saved as a `.jar` file. CircleCI can store this file as an artifact, keeping it available long after the process has finished. + +[#safe-and-unsafe-content-types] +== Safe and unsafe content types +By default, only predefined artifact types are allowed to be rendered. This protects users from uploading, and potentially executing, malicious content. The 'allowed-list' is as follows: + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Category +| Safe Type + +| Text +| Plain + +| Application +| JSON + +| Image +| PNG + +| Image +| JPG + +| Image +| GIF + +| Image +| BMP + +| Video +| WEBM + +| Video +| OGG + +| Video +| mp4 + +| Audio +| WEBM + +| Audio +| AAC + +| Audio +| mp4 + +| Audio +| MPEG + +| Audio +| OGG + +| Audio +| WAV +|=== +<<< + +Also, by default, the following types will be rendered as plain text: + +[.table.table-striped] +[cols=2*, options="header", stripes=even] +|=== +| Category +| Unsafe Type + +| Text +| HTML + +| Text +| CSS + +| Text +| JavaScript + +| Text +| ecmascript + +| Application +| JavaScript + +| Application +| ecmascript + +| Text +| XML +|=== + +[#allow-unsafe-types] +=== Allow unsafe types +You can choose to allow unsafe types to be rendered, if required. Add the following to your `values.yaml` file: + +[source,yaml] +serveUnsafeArtifacts: true diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-load-balancers.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-load-balancers.adoc new file mode 100644 index 0000000000..a79825851c --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/managing-load-balancers.adoc @@ -0,0 +1,30 @@ += Managing load balancers +:page-platform: Server 4.9, Server Admin +:page-description: Use this guide to make the frontend load balancer private for CircleCI server 4.9. +:experimental: + +CircleCI server uses a load balancer to manage network traffic entering and leaving the Kubernetes cluster. + +The load balancer manages all traffic coming into the application. The load balancer is public by default, but can be made private. + +[#make-the-frontend-load-balancer-private] +== Make the frontend load balancer private + +**** +**Webhooks:** If you choose to make the frontend load balancer private, the following conditions must be met, depending on your VCS, for webhooks to work: + +* **GitHub Enterprise** – your CircleCI server installation must be in the same internal network as GHE. +* **GitHub.com** – set up a proxy for incoming webhooks and set it as override for the webhook host URL. This setting can be found in the CircleCI app UI under **Admin Settings** > **System Settings** > **Override webhook host URL**. +**** + +NOTE: The Private load balancers option only works with installations on CircleCI server on GKE or EKS. + +In your `values.yaml` override file, set the following parameter to true. The parameter is false (public) by default. + +[source,yaml] +---- +nginx: + private_load_balancers: true +---- + +NOTE: If you are changing this setting after the initial deployment of CircleCI server, you may need to delete the old public load balancer so that Kubernetes requests a new load balancer with the new configuration. diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc new file mode 100644 index 0000000000..71e8129219 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc @@ -0,0 +1,67 @@ += Managing orbs +:page-platform: Server 4.9, Server Admin +:page-description: Use this page to learn about orbs and how to manage them within CircleCI server 4.9. +:experimental: + +This section describes how to manage orbs for an installation of server 4.9. Server installations include their own local orb registry. All orbs referenced in configs refer to the orbs in the server orb registry. You are responsible for maintaining orbs. This includes copying orbs from the public registry, updating orbs that may have been previously copied, and registering your company's private orbs, if they exist. + +For information on orbs and related use cases, see the xref:orbs:use:orb-intro.adoc#[orbs docs]. + +If you are looking for information on creating an orb, see the xref:orbs:author:orb-author.adoc#[Introduction to Authoring Orbs]. + +Orbs are accessed via the xref:guides:toolkit:local-cli.adoc#[CircleCI CLI]. Orbs require your CircleCI user to be an admin. They also require a xref:guides:toolkit:managing-api-tokens.adoc#[personal API token]. + +Ensure that you are using a personal API token generated _after_ your user account is made an admin. + +Providing a local repository location using the `--host` option allows you to access your local server orbs, rather than public cloud orbs. For example, if your server installation is located at `\http://circleci.somehostname.com`, you can run orb commands local to that orb repository by passing `--host \http://circleci.somehostname.com`. + +[#list-available-orbs] +== List available orbs +To list available public orbs, visit the orb directory, or run the following command: + +[source,shell] +---- +circleci orb list +---- + +To list available private orbs (registered in your local server orb repository), run the following command: + +[source,shell] +---- +circleci orb list --host <your-server-install-domain> --token <your-api-token> +---- + +[#import-a-public-orb] +== Import a public orb +To import a public orb to your local server orb repository, run the following command: + +[source,bash] +---- +circleci admin import-orb <namespace><orb-name>@<orb-version> --host <your-server-installation-domain> --token <your-api-token> +---- + +NOTE: `<orb-name>` and `<orb-version>` are optional. You can choose to only specify a namespace, in which case the most recent versions of all orbs in the namespace will be imported. + +[#fetch-a-public-orbs-updates] +== Fetch a public orb’s updates +To update a public orb in your local server orb repository with a new version, run the following command: + +[source,bash] +---- +circleci admin import-orb <namespace><orb-name>@<orb-version> --host <your-server-installation-domain> --token <your-api-token> +---- + +NOTE: `<orb-name>` and `<orb-version>` are optional. You can choose to only specify a namespace, in which case the most recent versions of all orbs in the namespace will be updated. + +[using-orbs-behind-a-proxy] +== Using orbs behind a proxy + +When importing orbs, the CLI must be able to talk to the server installation and to `circleci.com`. If you want to do this when using a server installation behind a proxy, the CLI needs to be configured to use the proxy to make those requests to `circleci.com`, rather than proxying requests to the server install. For example: + +[source,bash] +---- +export NO_PROXY=server.example.com +export HTTPS_PROXY=http://proxy.example.com:3128 +export HTTP_PROXY=http://proxy.example.com:3128 +circleci admin import-orb ns[orb[@version]] --host <your server installation domain> --token <your api token> +---- diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc new file mode 100644 index 0000000000..b9cdd285c2 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc @@ -0,0 +1,63 @@ += Managing user accounts +:page-platform: Server 4.9, Server Admin +:page-description: This section provides information to help CircleCI server 4.9 operators manage user accounts. +:experimental: + +This section provides information to help operators manage user accounts. For an overview of user accounts, see the Admin settings overview from the CircleCI app by clicking on your profile in the top right corner and selecting *Admin*. + +[#suspending-accounts] +== Suspending accounts +This section covers how to suspend new, active, or inactive accounts. + +[#new-accounts] +=== New accounts + +Any user associated with your GitHub organization can create a user account for your CircleCI server installation. To control who has access, you can choose to automatically suspend all new users, requiring an administrator to activate them before they can log in. To access this feature: + +. Navigate to your CircleCI Admin Settings. +. Select *System Settings* from the Admin Settings menu. +. Set *Suspend New Users* to *True*. + +[#active-accounts] +=== Active accounts +When an account is no longer required, you can suspend the account. It will no longer be active and will not count against your license quota. To suspend an account: + +. Navigate to your CircleCI Admin Settings. +. Select *Users* from the Admin Settings menu. +. Scroll to locate the account in either the Active or Inactive window. +. Select *Suspend* next to the account name and the account will appear in the Suspended window. + +[#inactive-accounts] +=== Inactive accounts +Inactive accounts are those that have been approved by the administrator of the server installation but have not logged into the system successfully. These accounts do not count against your available server seats. + +[#reactivating-accounts] +== Reactivating accounts +This section covers how to reactivate new or previously active accounts. + +[#reactivate-a-new-account] +=== Reactivate a new account +To activate a new account that was automatically suspended and allow the associated user access to your installation of CircleCI server: + +. Navigate to your CircleCI Admin Settings. +. Select *Users* from the Admin Settings menu. +. View the *Suspended New Users* window. +. Select **Activate* next to the User you wish to grant access and the account will appear in the Active Window. + +[#reactivate-an-account] +=== Reactivate an account +To reactivate an account that has been suspended: + +. Navigate to your CircleCI Admin Settings. +. Select *Users* from the Admin Settings menu. +. View the Suspended window. +. Select *Activate* next to the User you wish to grant access and the account will appear in the Active window. + +[#limiting-registration-by-github-organization] +== Limiting registration by GitHub organization +When using GitHub.com, you can limit who can register with your CircleCI install to people with some connection to your approved organizations list. To access this feature: + +. Navigate to your CircleCI Admin Settings page. +. Select *System Settings* from the Admin Setting menu. +. Scroll down to Required Org Membership List. +. Enter the organization(s) you wish to approve. If entering more than one organization, use a comma-delimited string. diff --git a/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc b/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc new file mode 100644 index 0000000000..782b504cd3 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc @@ -0,0 +1,11 @@ += Monitoring stack reference Helm chart +:page-platform: Server 4.9, Server Admin +:page-description: Learn how to deploy your own monitoring stack using our Helm chart as a reference. +:experimental: + +You may want to access metrics to gain insight into the performance of builds on your CircleCI server installation. This page provides a sample to show how observability can be set up using the most common tools via a Helm chart package. + +CircleCI does not support or provide any tooling to provide observability into the running services and infrastructure that CircleCI server operates in. +This guide is provided as a reference on how you could set up observability and is not an officially supported product. Our reference monitoring stack currently contains samples on how to set up and configure Grafana, Prometheus, and Telegraf, as well as some built-in dashboards for monitoring key service level indicators. + +You can find the link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#server-monitoring-stack[reference monitoring stack on our public GitHub], as well as more information on how you could configure this setup. You will also find information on link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#modifying-or-adding-grafana-dashboards[adding to or editing the existing dashboards]. diff --git a/docs/server-admin-4.9/modules/operator/pages/operator-overview.adoc b/docs/server-admin-4.9/modules/operator/pages/operator-overview.adoc new file mode 100644 index 0000000000..dc062b19b9 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/operator-overview.adoc @@ -0,0 +1,75 @@ += Operator overview +:page-platform: Server 4.9, Server Admin +:page-description: Learn about the various tasks and tools involved in administering an installation of CircleCI server 4.9. +:experimental: + +This guide contains information for CircleCI server operators, or those responsible for ensuring CircleCI server is running properly through maintenance and monitoring. Before reading this operator guide, ensure you have read the xref:overview:circleci-server-overview.adoc[CircleCI server 4.9 overview]. + +CircleCI server schedules CI/CD jobs using the link:https://www.nomadproject.io/[Nomad] scheduler. The Nomad Server (control plane) runs inside the Kubernetes cluster, while Nomad clients are provisioned outside the cluster. The Nomad clients need access to the Nomad control plane and Kong. + +CircleCI server can run Docker jobs on the Nomad clients, and also in dedicated virtual machines for remote Docker jobs. + +Job artifacts and outputs are sent directly from jobs in Nomad to object storage (S3, GCS, or other supported options). + +Audit logs and other items from the application are also stored in object storage, so both the Kubernetes cluster and the Nomad clients need access to object storage. + +[#execution-environment] +== Execution environment + +CircleCI server 4.9 uses Nomad as the primary job scheduler. Refer to our xref:introduction-to-nomad-cluster-operation.adoc#[Introduction to Nomad Cluster Operation] to learn more about the job scheduler and how to perform basic client and cluster operations. + +CircleCI Nomad clients automatically provision compute resources according to the executors configured for each job in a project's `.circleci/config.yml` file. + +[#nomad-clients] +=== Nomad clients +Nomad Clients run without storing state, allowing you to increase or decrease the number of containers as needed. + +If a job's resource class requires more resources than the Nomad client's instance type has available, it will remain in a pending state. Choosing a smaller instance type for Nomad clients is a way to reduce cost, but limits the Docker resource classes CircleCI can use. Review the xref:reference:ROOT:configuration-reference.adoc#resourceclass[available resource classes] to decide what is best for you. The default instance type will run up to `xlarge` resource classes. + +See the link:https://www.nomadproject.io/docs/install/production/requirements#resources-ram-cpu-etc[Nomad Documentation] for options for optimizing the resource usage of Nomad clients. + +NOTE: The maximum machine size for a Nomad client is 128GB RAM/64 CPUs. Contact your CircleCI account representative to request use of larger machines for Nomad clients. + +For more information on Nomad port requirements, see the +xref:installation:hardening-your-cluster.adoc#[Hardening Your Cluster] +guide. + +[#github] +=== GitHub +CircleCI uses GitHub or GitHub Enterprise as an Identity Provider. GitHub Enterprise can, in turn, use +https://docs.github.com/en/github-ae@latest/admin/authentication/about-identity-and-access-management-for-your-enterprise[SAML or SCIM] to manage users from an external Identity Provider. + +NOTE: CircleCI does not support changing the URL or backend GitHub instance after it has been set up. + +The following table describes the ports used on machines running GitHub to communicate with the services and Nomad client instances. + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Source +| Ports +| Use + +| Services +| 22 +| Git access + +| Services +| 80 or 443 +| API access + +| Nomad Client +| 22 +| Git access + +| Nomad Client +| 80 or 443 +| API access +|=== + +ifndef::pdf[] +[#next-steps] +== Next steps + +* Read the <<introduction-to-nomad-cluster-operation#,Introduction to Nomad Cluster Operation>>. +endif::[] diff --git a/docs/server-admin-4.9/modules/operator/pages/troubleshooting-and-support.adoc b/docs/server-admin-4.9/modules/operator/pages/troubleshooting-and-support.adoc new file mode 100644 index 0000000000..8d5c28e5f8 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/troubleshooting-and-support.adoc @@ -0,0 +1,125 @@ += Troubleshooting and support +:page-platform: Server 4.9, Server Admin +:page-description: Use this document to find troubleshooting steps if you are having problems with your CircleCI server 4.9 installation. +:experimental: + +This document describes an initial set of troubleshooting steps to take if you are experiencing problems with your CircleCI server 4.9 installation. If your issue is not addressed below, you can generate a support bundle or contact your CircleCI account team. + +toc::[] + +[#generate-support-bundle] +== Generate support bundle +A support bundle is used by CircleCI engineers to diagnose and fix any issues you are experiencing. They are typically requested when you open a ticket. + +To generate a support bundle, follow the steps below. + +=== Prerequisites +. First, make sure CircleCI server is deployed and you have access to the cluster/namespace through kubectl. + +[source,bash] +---- +# To check if you have access to cluster/namespace +kubectl -n <namespace> get pods +---- + +. Next, install link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[Krew]. + +. Install link:https://github.com/replicatedhq/troubleshoot#support-bundle[support-bundle (kubectl plugin)] to your local development machine. + +[source,bash] +---- +# To install support-bundle plugin +kubectl krew install support-bundle +---- + +=== Generating support bundle + +When ready, run the support bundle from the current directory and wait for it to finish. + +[source,bash] +---- +kubectl support-bundle https://raw.githubusercontent.com/CircleCI-Public/server-scripts/main/support/support-bundle.yaml +---- + + +[#managing-pods] +== Managing pods + +[verify-pod-readiness-and-status] +=== Verify pod readiness and status +NOTE: Check the `READY` column as well as `STATUS`. Even if the `STATUS` is `Running`, pods are not ready to serve user requests. Some pods may take some time to become ready. + +[source,bash] +---- +kubectl get pods -n <namespace> +NAME READY STATUS RESTARTS AGE +api-service-5c8f557548-zjbsj 1/1 Running 0 6d20h +audit-log-service-77c478f9d5-5dfzv 1/1 Running 0 6d20h +builds-service-v1-5f8568c7f5-62h8n 1/1 Running 0 6d20h +circleci-mongodb-0 1/1 Running 0 6d20h +circleci-nomad-0 1/1 Running 6 6d20h +… +---- + +To show only pods with a status besides `Running`, you can use the `--field-selector` option. + +[source,bash] +---- +kubectl get pods --field-selector status.phase!=Running -n <namespace> +NAME READY STATUS RESTARTS AGE +nomad-server 0/1 Error 0 5d22h +---- + +[#verify-pod-settings-and-status] +=== Verify pod settings and status +To show detailed settings and status of pods, use the following command: + +[source,bash] +---- +kubectl describe pods <pod-name> -n <namespace> +---- + +[#get-pod-logs] +=== Get pod logs +To show logs of pods, use the following command: + +[source,bash] +---- +kubectl logs <pod-name> -n <namespace> +---- + +[#restart-pods] +=== Restart pods +To restart specific pods, the easiest way is remove the pod. Kubernetes automatically recreates the pod: + +[source,bash] +---- +kubectl delete pod <pod-name> -n <namespace> --now +---- + +[#air-gap-repl] +=== Utilizing REPL in an air-gapped environment +If you are in an air-gapped environment, you will first need to download the `lein` REPL dependencies, commit them, and upload it to your private Docker registry. + +[source,bash] +---- +docker run -it clojure lein repl :connect 6005 +docker commit $(docker ps -aq | head -1) <your-private-registry>/clojure +---- + +Now you are ready to access the REPL. From a system with access to the private Docker registry and `kubectl` access to the cluster, run the script below. Be sure to modify it to match your namespace if it is not `circleci-server`. + +[source,bash] +---- +SVC_NAME=frontend +INET_ADDR="$(ip -4 route get 192.0.2.1 | grep -o 'src [0-9.]\{1,\}' | awk '{ print $2 }')" + +# Start port-forwarding in background +kubectl port-forward "$(kubectl get po -l app="${SVC_NAME}" -n circleci-server -o jsonpath='{.items[0].metadata.name}')" --address "${INET_ADDR}" 6005 -n circleci-server & + +# Start nREPL +sudo docker run --rm -it clojure lein repl :connect "${INET_ADDR}":6005 + +# End port-forwarding +kill $! +---- diff --git a/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc b/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc new file mode 100644 index 0000000000..254e09efde --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc @@ -0,0 +1,95 @@ += Upgrade MongoDB +:page-platform: Server 4.9, Server Admin +:page-description: Learn how to upgrade MongoDB up to v4.4.15 in an installation of CircleCI server 4.9. +:experimental: + +MongoDB is a database service used by CircleCI server. This page describes how to upgrade MongoDB to version `4.4.15`. + +MongoDB `3.6.22` is shipped with CircleCI server 4.9. + +[#prerequisites] +== Prerequisites + +* Ensure backups have been taken. You will need a backup of MongoDB to restore to in case anything goes wrong during the upgrade progress +* You are prepared to modify the `values.yaml` +* `helm upgrade` will work from your system to upgrade the cluster +* MongoDB root password is available + +[#script-upgrade] +== Scripted upgrade +We have created a shell script which may be used to upgrade your cluster's MongoDB instance link:https://github.com/CircleCI-Public/server-scripts/tree/main/upgrade-mongo-to-4.4[here]. +If you wish, you may use the following instructions to manually upgrade your cluster's MongoDB + +[#manual-upgrade] +== Manual upgrade + +=== 1. Upgrade from MongoDB 3.6 to 4.0 + +. Your `values.yaml` should contain the following snippet: ++ +```yaml +mongodb: + image: + tag: 3.6.22-debian-9-r38 +``` ++ +To begin the upgrade process, change the tag to `4.0.27-debian-9-r118`: ++ +```yaml +mongodb: + image: + tag: 4.0.27-debian-9-r118 +``` + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) ++ +```bash +kubectl exec -it mongodb-0 -- mongo -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "4.0" } ) +``` + +. You should get a `{ "ok" : 1 }` response from Mongo. Exit out of the MongoDB shell and pod. + +=== 2. Upgrade from MongoDB 4.0 to 4.2 + +. Change the tag to `4.2.21-debian-10-r8`: ++ +```yaml +mongodb: + image: + tag: 4.2.21-debian-10-r8 +``` + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) ++ +```bash +kubectl exec -it mongodb-0 -- mongo -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "4.2" } ) +``` + +. You should get `{ "ok" : 1 }` again. Exit out of the shell and pod. + +=== 3. Upgrade from MongoDB 4.2 to 4.4 + +. Change the tag one more time to `4.4.15-debian-10-r8`: ++ +```yaml +mongodb: + image: + tag: 4.4.15-debian-10-r8 +``` + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) ++ +```bash +kubectl exec -it mongodb-0 -- mongo -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "4.4" } ) +``` + +. Once you receive `{ "ok" : 1 }`, you have successfully upgraded your MongoDB to 4.4.15. diff --git a/docs/server-admin-4.9/modules/operator/pages/usage-data-collection.adoc b/docs/server-admin-4.9/modules/operator/pages/usage-data-collection.adoc new file mode 100644 index 0000000000..1f08f5b21c --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/usage-data-collection.adoc @@ -0,0 +1,12 @@ += Usage data collection +:page-platform: Server 4.9, Server Admin +:page-description: Learn about CircleCI server 4.9 usage data collection for the purpose of improving our product and services. +:experimental: + +CircleCI typically collects usage data, such as logs and other aggregated data, for the purpose of improving our products and services. We never collect personally identifiable information or information that is specific to your projects or accounts. + +[#current-data-collected] +== Current data collected +server 4.9 does not include the data collection service. + +If this changes in any future release, we will communicate any data that will be collected. diff --git a/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc b/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc new file mode 100644 index 0000000000..d22a64d338 --- /dev/null +++ b/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc @@ -0,0 +1,10 @@ += User authentication +:page-platform: Server 4.9, Server Admin +:page-description: CircleCI server 4.9 supports OAuth through GitHub or GitHub Enterprise. +:experimental: + +CircleCI server currently supports OAuth through GitHub or GitHub Enterprise. + +The default method for user account authentication in CircleCI server is through GitHub.com/GitHub Enterprise OAuth. + +After your installation is up and running, provide users with a link to access the CircleCI application - for example, `<your-circleci-hostname>.com` – and they will be prompted to set up an account by running through the GitHub/GitHub Enterprise OAuth flow before being redirected to the CircleCI login screen. diff --git a/docs/server-admin-4.9/modules/overview/pages/circleci-server-overview.adoc b/docs/server-admin-4.9/modules/overview/pages/circleci-server-overview.adoc new file mode 100644 index 0000000000..a91df53dde --- /dev/null +++ b/docs/server-admin-4.9/modules/overview/pages/circleci-server-overview.adoc @@ -0,0 +1,411 @@ += CircleCI server 4.9 overview +:page-platform: Server 4.9, Server Admin +:page-description: CircleCI server 4.9 is a continuous integration and continuous delivery (CI/CD) platform that you can install on your GCP or AWS Kubernetes cluster. +:experimental: + +[#introduction] +== Introduction + +CircleCI server is an on-premises CI/CD platform for enterprise customers. Server is suited for organizations who have compliance or security needs that require them to operate within their firewall, in a private cloud, or in a data center. + +CircleCI server provides the same features as CircleCI’s cloud offering, but operates within your Kubernetes cluster. + +[#architecture] +== Architecture + +The following diagram presents and overview of the services architecture of CircleCI server. + +.CircleCI server 4.9 Architecture +image::guides:ROOT:server-4-architecture-diagram-2024.png[server 4.9 services architecture] + +[#ports-and-services] +=== Ports and services + +The CircleCI server application exposes two services, `nginx` and `nomad-server`, using a single load balancer. If required, the load balancer can be made private, separating it from the public internet. + +[.table.table-striped] +[cols=3*, options="header", stripes=even] +|=== +| Service +| Ports +| Description + +| Frontend GUI Proxy & API +| 80 and 443 +| Exposes the web application. + +| Nomad Control Plane +| 4647 +| Exposes an RPC protocol for Nomad clients. +|=== + +The application exposes a number of external ports. These ports are used for various functions as defined in the table below. + +[.table.table-striped] +[cols=6*, options="header", stripes=even] +|=== +| Port number +| Protocol +| Direction +| Source / Destination +| Use +| Notes + +| `80` +| TCP +| Inbound +| End users +| HTTP web app traffic +| + +| `443` +| TCP +| Inbound +| End users +| HTTP web app traffic +| + +| `22` +| TCP +| Inbound +| Administrators +| SSH +| Only required for the bastion host. + +| `64535`-`65535` +| TCP +| Inbound +| +| SSH into builds +| Only required for the Nomad clients. +|=== + +[#job-scheduling-and-artifact-storage] +=== Job scheduling and artifact storage + +CircleCI server schedules CI jobs using HashiCorp link:https://www.nomadproject.io/[Nomad]. Depending on the execution environment used for a job, the resources are provisioned through one of the following: + +* Nomad for the Docker executor +* Machine provisioner for virtual machines (VMs), the `machine` executor +* Runner, if used, for all executor types + +The Nomad control plane runs inside your Kubernetes cluster. Nomad clients, which are responsible for running scheduled CircleCI jobs, are provisioned outside the cluster. + +CircleCI server can run Docker jobs on the Nomad clients themselves or in a dedicated virtual machine (VM). + +For more information on Nomad architecture and use, see the xref:operator:introduction-to-nomad-cluster-operation.adoc#[Introduction to Nomad Cluster operation]. + +Job artifacts and outputs are sent directly from jobs in Nomad to object storage (S3, Google Cloud Storage, or other supported options). Audit logs and other items from the application are also stored in object storage, so both the Kubernetes cluster and the Nomad clients need access to object storage. + +[#list-of-services-and-their-function] +== List of services and their function + +CircleCI server 4.9 consists of the following services. Find their descriptions and failure implications below: + +[.table-scroll] +-- +[.datatable.cols="4,1,3,3,3"] +|=== +| Service | Component | Description | What happens if it fails? | Notes + +| `api-service` +| App Core +| Provides a GraphQL API that provides data to render the web frontend. +| Many parts of the UI (such as Contexts) will fail completely. +| + +| `audit-log-service` +| App Core +| Persists audit log events to blob storage for long-term storage. +| Some events may not be recorded. +| + +| `branch-service` +| App Core +| A service responsible for listening to the event stream. Detects branch deletions, job updates, pushes, workflow updates. +| +| + +| `builds-service` +| App Core +| Ingests from `www-api` and sends to plans-service, workflows-conductor, and to orbs-service. +| +| + +| `circleci-mongodb` +| Execution +| Primary datastore +| +| + +| `circleci-postgres` +| Data storage for microservices. +| +| +| + +| `circleci-rabbitmq` +| Pipelines and Execution +| Queuing for workflow messaging, test results, usage, cron tasks, output, notifications, and scheduler. +| +| + +| `circleci-redis` +| Execution +| Caches data that will not be stored permanently (such as build logs), for request caching, and for rate limit calculations. +| A failed cache can result in rate limiting from the VCS if too many calls are made to it. +| + +| `circleci-telegraf` +| +| Telegraf collects statsd metrics. All white-boxed metrics in our services publish statsd metrics that are sent to Telegraf, +but can also be configured to be exported to other places (such as Datadog or Prometheus). +| +| + +| `circleci-vault` +| +| HashiCorp Vault to run encryption and decryption as a service for secrets. +| +| + +| `contexts-service` +| App Core +| Stores and provides encrypted contexts. +| All builds using Contexts will fail. +| + +| `cron-service` +| Pipelines +| Triggers scheduled workflows. +| Scheduled workflows will not run. +| + +| `distributor-*` +| App Core +| Responsible for accepting build requests and distributing the job to appropriate queues. +| +| + +| `docker-provisioner-*` +| Docker Compute Management +| Responsible for scheduling jobs on Nomad cluster(s). +| +| + +| `domain-service` +| App Core +| Stores and provides information about our domain model. Works with permissions and API. +| Workflows will fail to start and some REST API calls may fail, causing 500 errors in the CircleCI UI. If LDAP authentication is in use, all logins will fail. +| + +| `execution-gateway-*` +| Execution +| Boundary for execution to provide API to rest of CircleCI +| +| + +| `feature-flags-api` +| Execution +| Used to configure and fetch feature flags. +| Systems will use defaults in place of any flags. In the case of server it should have no effect, since no feature flags should be set. +| + +| `frontend` +| Frontend +| CircleCI web app and `www-api` proxy. +| The UI and REST API will be unavailable and no jobs will be triggered by GitHub/Enterprise. Running builds will be OK, but no updates will be seen. +| Rate limit of 150 requests per second with a single user instantaneous limit of 300 requests. + +| `insights-service` +| Metrics +| A service to aggregate build and usage metrics for exporting and analysis. +| +| + +| `kong` +| App Core +| API management. +| +| + + +| `legacy-notifier` +| App Core +| Handles notifications to external services (for example, Slack or email). +| +| + +| `machine-provisioner-*` +| Machine Compute Management +| Periodically requests machines from compute provider to run jobs for both `machine` and remote Docker. +| Periodically checks for stale machine and remote Docker instances and requests that provider removes them. +| + +| `nginx` +| App Core / Frontend +| Handles traffic redirection and ingress. +| +| + +| `nomad-autoscaler` +| Nomad +| Manages scaling of Nomad clusters in AWS and GCP environments. +| +| + +| `nomad-server` +| Nomad +| Responsible for managing Nomad Clients. +| +| + +| `oidc-service` +| App Core +| Mints OIDC tokens, serves OpenID configuration and public JSON web keys. +| OIDC tokens would not be present in jobs. Jobs depending on images present in AWS ECR and authenticated with OIDC will fail to run. +| + +| `oidc-tasks-service` +| App Core +| Provides interface for minting OIDC tokens with customizable claims. Integrates with the OIDC service. +| OIDC tokens would not be present in jobs. Jobs depending on images present in AWS ECR and authenticated with OIDC will fail to run. +| + +| `orb-service` +| Pipelines +| Handles communication between orb registry and config. +| +| + +| output +| Execution +| Receives job output and status updates and writes them to S3. Also provides an API to running jobs to access caches, workspaces, store caches, workspaces, artifacts, and test results. +| +| + +| `permissions-service` +| App Core +| Provides the CircleCI permissions interface. +| Workflows will fail to start and some REST API calls may fail, causing 500 errors in the UI. +| + +| `policy-service-*` +| App Core +| Core service of config policies framework. Allows management of policy documents and policy bundles, and evaluates inputs against these bundles using the `circle-policy-agent`. +| While config policies is enabled for your organization, all pipelines will fail to run. +| + +| `runner-admin` +| Runner Admin +| Manages Runner resource classes and tokens, & coordinates runner task handling +| +| + +| `soketi` +| Frontend +| WebSockets server. +| +| + +| `step` +| Frontend +| Provides UI output in the job view +| +| + +| `telegraf` +| Metrics +| Collection of metrics. +| +| + +| `web-ui-*` +| Frontend +| Micro Front End (MFE) services used to render the frontend web application GUI. +| The respective services page will fail to load. Example: A `web-ui-server-admin` failure means the server Admin page will fail to load. +| The MFEs are used to render the web application located at `app.<my domain here>` + +| `webhook-service` +| App Core +| Service responsible for all webhooks, including management of state and handling events. +| +| + +| `workflows-conductor-event-consumer` +| Pipelines +| Takes in information from VCS to kick off pipelines. +| New Pipelines will not be kicked off when there are changes in the VCS. +| + +| `workflows-conductor-grpc` +| Pipelines +| Helps translate the information through gRPC. +| +| + +|=== +-- + +[#supported-platforms] +== Supported platforms + +CircleCI server is designed to deploy within a Kubernetes cluster. The machine service (machine provisioner) is able to leverage unique EKS or GKE offerings to dynamically create VM images. + +If installing outside of EKS or GKE, additional work is required to access some of the same machine build features. Setting up CircleCI runner gives you access to the same feature set as machine provisioner across a much wider range of operating systems and machine types (for example, macOS). + +We do our best to support a wide range of platforms for installation. We use environment-agnostic solutions wherever possible. However, we do not test all platforms and options. For that reason, we provide a list of tested environments, which we will continue to expand. + +[.table.table-striped] +[cols="1,1,3", options="header", stripes=even] +|=== +| Environment +| Status +| Notes + +| EKS +| Tested +| + +| GKE +| Tested +| + +| Local installation +| Tested +| + +| Azure +| Untested +| Should work with MinIO and Runner. + +| Digital Ocean +| Untested +| Should work with MinIO and Runner. + +| OpenShift +| Untested +| Known not to work. + +| Rancher +| Untested +| Should work with MinIO and Runner. +|=== + +[#installation-options] +== Installation options + +CircleCI server can be installed using cloud resources (GCP or AWS), locally, and in an air-gapped environment. For installation instructions, refer to the following: + +* Cloud installation guides for xref:installation:phase-1-aws-prerequisites.adoc#[AWS] and xref:installation:phase-1-gcp-prerequisites.adoc#[GCP]. Follow the sections for your chosen cloud provider. +* xref:installation:phase-1-aws-prerequisites.adoc#[Local] installation. Notes are provided in the main installation guide to show which sections are optional, or different, for local installations. +* xref:air-gapped-installation:phase-1-prerequisites.adoc#[Air-gapped] installation guide. This guide is designed to be used along-side key sections of the main installation guides for xref:installation:phase-1-aws-prerequisites.adoc#[AWS] and xref:installation:phase-1-gcp-prerequisites.adoc#[GCP]. + +ifndef::pdf[] + +[#next-steps] +== Next steps + +* xref:release-notes.adoc#[CircleCI server 4.9 release notes]. +* server 4.9 installation prerequisites for xref:installation:phase-1-aws-prerequisites.adoc#[AWS] and xref:installation:phase-1-gcp-prerequisites.adoc#[GCP]. +endif::pdf[] diff --git a/docs/server-admin-4.9/modules/overview/pages/release-notes.adoc b/docs/server-admin-4.9/modules/overview/pages/release-notes.adoc new file mode 100644 index 0000000000..a2a6b80dcc --- /dev/null +++ b/docs/server-admin-4.9/modules/overview/pages/release-notes.adoc @@ -0,0 +1,19 @@ += Release notes +:page-platform: Server 4.9, Server Admin +:page-description: Details of the new features included in each CircleCI server 4.9 release. +:experimental: + +[#overview] +== Overview + +#TBC# + +[#upgrade] +== Upgrade +For upgrade steps see the xref:installation:upgrade-server.adoc#[Upgrade server 4.9 guide]. + +== Release 4.9.0 + +=== Changelog + +For full details of this release see the link:https://circleci.com/changelog/#server-release-4-8-0[changelog]. From 202cec7f0a31a1503a43889169cc83081d0d1025 Mon Sep 17 00:00:00 2001 From: rosie yohannan <rosie@circleci.com> Date: Tue, 4 Nov 2025 11:21:23 +0000 Subject: [PATCH 02/10] remove 4.8 from content that gets indexed for search --- extensions/export-content-extension.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/extensions/export-content-extension.js b/extensions/export-content-extension.js index a873a2ec24..5fd8944589 100644 --- a/extensions/export-content-extension.js +++ b/extensions/export-content-extension.js @@ -49,7 +49,8 @@ function collectPages(contentCatalog, siteUrl) { 'server-4.4', 'server-4.5', 'server-4.6', - 'server-4.7' + 'server-4.7', + 'server-4.8' ]; contentCatalog.getComponents().forEach(({ name: comp, versions }) => { From 3494b66cf45bb8d36710529cb58d6c13cbeb4e45 Mon Sep 17 00:00:00 2001 From: rosie yohannan <rosie@circleci.com> Date: Tue, 4 Nov 2025 11:29:13 +0000 Subject: [PATCH 03/10] no index 4.8 --- antora-playbook.yml | 2 ++ .../pages/additional-considerations.adoc | 1 + .../modules/air-gapped-installation/pages/example-values.adoc | 1 + .../air-gapped-installation/pages/phase-1-prerequisites.adoc | 1 + .../pages/phase-2-configure-object-storage.adoc | 1 + .../pages/phase-3-install-circleci-server.adoc | 1 + .../pages/phase-4-configure-nomad-clients.adoc | 1 + .../pages/phase-5-test-your-installation.adoc | 1 + .../modules/installation/pages/hardening-your-cluster.adoc | 1 + .../modules/installation/pages/installation-reference.adoc | 1 + .../installation/pages/installing-server-behind-a-proxy.adoc | 1 + .../modules/installation/pages/phase-1-aws-prerequisites.adoc | 1 + .../modules/installation/pages/phase-1-gcp-prerequisites.adoc | 1 + .../modules/installation/pages/phase-2-aws-core-services.adoc | 1 + .../modules/installation/pages/phase-2-gcp-core-services.adoc | 1 + .../installation/pages/phase-3-aws-execution-environments.adoc | 1 + .../installation/pages/phase-3-gcp-execution-environments.adoc | 1 + .../installation/pages/phase-4-aws-post-installation.adoc | 1 + .../installation/pages/phase-4-gcp-post-installation.adoc | 1 + .../modules/installation/pages/upgrade-server.adoc | 1 + .../modules/operator/pages/application-lifecycle.adoc | 1 + .../modules/operator/pages/backup-and-restore.adoc | 1 + .../operator/pages/circleci-server-security-features.adoc | 1 + .../modules/operator/pages/configuring-external-services.adoc | 1 + .../modules/operator/pages/data-retention.adoc | 1 + .../operator/pages/expanding-internal-database-volumes.adoc | 1 + docs/server-admin-4.8/modules/operator/pages/faq.adoc | 3 ++- .../pages/introduction-to-nomad-cluster-operation.adoc | 1 + .../manage-virtual-machines-with-machine-provisioner.adoc | 1 + .../modules/operator/pages/managing-build-artifacts.adoc | 1 + .../modules/operator/pages/managing-load-balancers.adoc | 1 + .../server-admin-4.8/modules/operator/pages/managing-orbs.adoc | 1 + .../modules/operator/pages/managing-user-accounts.adoc | 1 + .../operator/pages/monitoring-stack-reference-helm-chart.adoc | 1 + .../modules/operator/pages/operator-overview.adoc | 1 + .../modules/operator/pages/troubleshooting-and-support.adoc | 1 + .../server-admin-4.8/modules/operator/pages/upgrade-mongo.adoc | 1 + .../modules/operator/pages/usage-data-collection.adoc | 1 + .../modules/operator/pages/user-authentication.adoc | 1 + .../modules/overview/pages/circleci-server-overview.adoc | 1 + .../server-admin-4.8/modules/overview/pages/release-notes.adoc | 1 + 41 files changed, 43 insertions(+), 1 deletion(-) diff --git a/antora-playbook.yml b/antora-playbook.yml index f7a34ff46d..7337dd3749 100644 --- a/antora-playbook.yml +++ b/antora-playbook.yml @@ -14,6 +14,8 @@ content: start_path: docs/reference - url: . start_path: docs/orbs + - url: . + start_path: docs/server-admin-4.9 - url: . start_path: docs/server-admin-4.8 - url: . diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/additional-considerations.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/additional-considerations.adoc index c2afc8c361..d9e0587889 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/additional-considerations.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/additional-considerations.adoc @@ -1,4 +1,5 @@ = Additional considerations +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This page presents some items that should be considered when starting an air-gapped installation of CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/example-values.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/example-values.adoc index 76c8882b2c..deaee90b51 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/example-values.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/example-values.adoc @@ -1,4 +1,5 @@ = Example `values.yaml` +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This page presents an example values.yaml file to help with setting up an air-gapped installation of CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc index b9f5038747..a1586d0aae 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-1-prerequisites.adoc @@ -1,4 +1,5 @@ = Phase 1 - Prerequisites +:page-noindex: true :page-platform: Server v4.8, Server Admin :experimental: :page-description: A guide to installing CircleCI server v4.8 in an air-gapped environment. Requirements, images and Helm charts. diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc index 51aa9cb694..1481e2bf53 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-2-configure-object-storage.adoc @@ -1,4 +1,5 @@ = Phase 2 - Configure object storage +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: How to configure object storage through MinIO to run CircleCI server v4.8 in an air-gapped environment. :experimental: diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc index 31d8fec2e2..d3738e74fd 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-3-install-circleci-server.adoc @@ -1,4 +1,5 @@ = Phase 3 - install CircleCI server +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: How to install the CircleCI server v4.8 Helm deployment to an air-gapped environment. :experimental: diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc index 2c045f6e11..047d434cc3 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-4-configure-nomad-clients.adoc @@ -1,4 +1,5 @@ = Phase 4 - Configure Nomad clients +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: How to configure Nomad clients to run with CircleCI server v4.8 in an air-gapped environment. :experimental: diff --git a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc index 2362823a8e..c0635008cb 100644 --- a/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc +++ b/docs/server-admin-4.8/modules/air-gapped-installation/pages/phase-5-test-your-installation.adoc @@ -1,4 +1,5 @@ = Phase 5 - Test your installation +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: How to test your CircleCI server v4.8 installation in an air-gapped environment. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/hardening-your-cluster.adoc b/docs/server-admin-4.8/modules/installation/pages/hardening-your-cluster.adoc index edc116542c..ac21cd6314 100644 --- a/docs/server-admin-4.8/modules/installation/pages/hardening-your-cluster.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/hardening-your-cluster.adoc @@ -1,4 +1,5 @@ = Hardening your cluster +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This section provides supplemental information on hardening your Kubernetes cluster for CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/installation-reference.adoc b/docs/server-admin-4.8/modules/installation/pages/installation-reference.adoc index a9d5c531c8..8152abe57d 100644 --- a/docs/server-admin-4.8/modules/installation/pages/installation-reference.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/installation-reference.adoc @@ -1,4 +1,5 @@ = Installation reference +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Reference documentation for installing CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/installing-server-behind-a-proxy.adoc b/docs/server-admin-4.8/modules/installation/pages/installing-server-behind-a-proxy.adoc index 40e6a734cc..d8a3fcdd1f 100644 --- a/docs/server-admin-4.8/modules/installation/pages/installing-server-behind-a-proxy.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/installing-server-behind-a-proxy.adoc @@ -1,4 +1,5 @@ = Installing server behind a proxy +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how to install CircleCI server v4.8 behind a proxy. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-1-aws-prerequisites.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-1-aws-prerequisites.adoc index 2ca5f1eab1..14c701015f 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-1-aws-prerequisites.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-1-aws-prerequisites.adoc @@ -1,4 +1,5 @@ = Phase 1 AWS - Prerequisites +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Find the general and infrastructure-specific requirements that are needed in order to configure the CircleCI server v4.8 application. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-1-gcp-prerequisites.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-1-gcp-prerequisites.adoc index 6eda4fe56c..c506eb7895 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-1-gcp-prerequisites.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-1-gcp-prerequisites.adoc @@ -1,4 +1,5 @@ = Phase 1 GCP - Prerequisites +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Find the general and infrastructure-specific requirements that are needed in order to configure the CircleCI server v4.8 application. :env-gcp: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-2-aws-core-services.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-2-aws-core-services.adoc index 5642769f89..6715c22ce8 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-2-aws-core-services.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-2-aws-core-services.adoc @@ -1,4 +1,5 @@ = Phase 2 AWS - core services +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Installation guide for CircleCI server v4.8 core services. :env-aws: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-2-gcp-core-services.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-2-gcp-core-services.adoc index b03f229bb7..2c8fab6842 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-2-gcp-core-services.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-2-gcp-core-services.adoc @@ -1,4 +1,5 @@ = Phase 2 GCP - core services +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Installation guide for CircleCI server v4.8 core services. :env-gcp: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-3-aws-execution-environments.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-3-aws-execution-environments.adoc index 3b8a924e9c..937abcf5f1 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-3-aws-execution-environments.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-3-aws-execution-environments.adoc @@ -1,4 +1,5 @@ = Phase 3 AWS - execution environments +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Installation guide for CircleCI server v4.8 execution environments. :env-aws: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-3-gcp-execution-environments.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-3-gcp-execution-environments.adoc index fe9cb5e83e..3fa2c3cebf 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-3-gcp-execution-environments.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-3-gcp-execution-environments.adoc @@ -1,4 +1,5 @@ = Phase 3 GCP - execution environments +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Installation guide for CircleCI server v4.8 execution environments. :experimental: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-4-aws-post-installation.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-4-aws-post-installation.adoc index c55f369168..2b822dd527 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-4-aws-post-installation.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-4-aws-post-installation.adoc @@ -1,4 +1,5 @@ = Phase 4 AWS - post installation +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: CircleCI server v4.8 post installation steps. :env-aws: diff --git a/docs/server-admin-4.8/modules/installation/pages/phase-4-gcp-post-installation.adoc b/docs/server-admin-4.8/modules/installation/pages/phase-4-gcp-post-installation.adoc index c7d062700c..f7054281af 100644 --- a/docs/server-admin-4.8/modules/installation/pages/phase-4-gcp-post-installation.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/phase-4-gcp-post-installation.adoc @@ -1,4 +1,5 @@ = Phase 4 GCP - post installation +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: CircleCI server v4.8 post installation steps. :env-gcp: diff --git a/docs/server-admin-4.8/modules/installation/pages/upgrade-server.adoc b/docs/server-admin-4.8/modules/installation/pages/upgrade-server.adoc index 692a934c97..610a27d5b2 100644 --- a/docs/server-admin-4.8/modules/installation/pages/upgrade-server.adoc +++ b/docs/server-admin-4.8/modules/installation/pages/upgrade-server.adoc @@ -1,4 +1,5 @@ = Upgrade server +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: "This document lists the steps required to upgrade a CircleCI server v4.8 installation." :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/application-lifecycle.adoc b/docs/server-admin-4.8/modules/operator/pages/application-lifecycle.adoc index 7fcf47d391..b7357aaa00 100644 --- a/docs/server-admin-4.8/modules/operator/pages/application-lifecycle.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/application-lifecycle.adoc @@ -1,4 +1,5 @@ = Application lifecycle +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn about CircleCI server v4.8 semantic versioning and release schedules. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc b/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc index 8e0c7c7fde..5c11d5af8b 100644 --- a/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc @@ -1,4 +1,5 @@ = Backup and restore +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This document outlines recommendations for how to back up and restore your CircleCI server v4.8 instance data and state. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/circleci-server-security-features.adoc b/docs/server-admin-4.8/modules/operator/pages/circleci-server-security-features.adoc index f18e80c201..be613af5ec 100644 --- a/docs/server-admin-4.8/modules/operator/pages/circleci-server-security-features.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/circleci-server-security-features.adoc @@ -1,4 +1,5 @@ = CircleCI server security features +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This document outlines security features built into CircleCI server v4.8 and related integrations. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc b/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc index f61dc95320..83c4d07d70 100644 --- a/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc @@ -1,4 +1,5 @@ = Configuring external services +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This document describes how to configure the following external services for use with a CircleCI server v4.8 installation :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/data-retention.adoc b/docs/server-admin-4.8/modules/operator/pages/data-retention.adoc index 0792bac899..2fd11cd1dd 100644 --- a/docs/server-admin-4.8/modules/operator/pages/data-retention.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/data-retention.adoc @@ -1,4 +1,5 @@ = Data retention in server +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how to configure data retention policies for MongoDB, PostgreSQL, and object storage buckets in your CircleCI server installation. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc b/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc index d70ab0de37..adc44dd94c 100644 --- a/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc @@ -1,4 +1,5 @@ = Expanding internal database volumes +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Expanding internal database volumes for CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/faq.adoc b/docs/server-admin-4.8/modules/operator/pages/faq.adoc index 27236bef4d..dc3a1f9305 100644 --- a/docs/server-admin-4.8/modules/operator/pages/faq.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/faq.adoc @@ -1,10 +1,11 @@ = CircleCI server v4.8 FAQ +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Find answers about the CircleCI server v4.8 data retention policy, what control is granted over Nomad certificates. :experimental: ## Does server v4.8 have a data retention policy? -Data retention for MongoDB and PostgreSQL can be configured by following our guide xref:data-retention.adoc[Data Retention in Server]. +Data retention for MongoDB and PostgreSQL can be configured by following our guide xref:data-retention.adoc[Data Retention in Server]. ## What control is granted over Nomad certificates? Full control of the certificates, all the way down to mTLS for Nomad. diff --git a/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc b/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc index 4f9db27708..109aa2e074 100644 --- a/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc @@ -1,4 +1,5 @@ = Introduction to Nomad cluster operation +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how to operate the Nomad Cluster in your CircleCI server v4.8 installation. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc b/docs/server-admin-4.8/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc index 474f713fe4..209ec0a255 100644 --- a/docs/server-admin-4.8/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/manage-virtual-machines-with-machine-provisioner.adoc @@ -1,4 +1,5 @@ = Manage virtual machines with machine provisioner +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: CircleCI server v4.8 machine provisioner service controls how machine executor (Linux and Windows images) and Remote Docker jobs are run. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc index 3e7d2b7ce4..a44c601b09 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc @@ -1,4 +1,5 @@ = Managing build artifacts +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how CircleCI server v4.8 build artifacts persist data after a job is completed and how they can be used for longer-term storage of your build process outputs. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-load-balancers.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-load-balancers.adoc index 57cf795441..4115eb339f 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-load-balancers.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-load-balancers.adoc @@ -1,4 +1,5 @@ = Managing load balancers +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Use this guide to make the frontend load balancer private for CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc index 7272d270b3..23e95f14dd 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc @@ -1,4 +1,5 @@ = Managing orbs +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Use this page to learn about orbs and how to manage them within CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc index 3698326784..dc568c255f 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc @@ -1,4 +1,5 @@ = Managing user accounts +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: This section provides information to help CircleCI server v4.8 operators manage user accounts. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc b/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc index 30504c2060..bd26839e58 100644 --- a/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc @@ -1,4 +1,5 @@ = Monitoring stack reference Helm chart +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how to deploy your own monitoring stack using our Helm chart as a reference. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/operator-overview.adoc b/docs/server-admin-4.8/modules/operator/pages/operator-overview.adoc index f356d01ce0..610cd9bc29 100644 --- a/docs/server-admin-4.8/modules/operator/pages/operator-overview.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/operator-overview.adoc @@ -1,4 +1,5 @@ = Operator overview +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn about the various tasks and tools involved in administering an installation of CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/troubleshooting-and-support.adoc b/docs/server-admin-4.8/modules/operator/pages/troubleshooting-and-support.adoc index a8455c7f2c..d57f752a76 100644 --- a/docs/server-admin-4.8/modules/operator/pages/troubleshooting-and-support.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/troubleshooting-and-support.adoc @@ -1,4 +1,5 @@ = Troubleshooting and support +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Use this document to find troubleshooting steps if you are having problems with your CircleCI server v4.8 installation. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/upgrade-mongo.adoc b/docs/server-admin-4.8/modules/operator/pages/upgrade-mongo.adoc index b8421a068e..c59c92b487 100644 --- a/docs/server-admin-4.8/modules/operator/pages/upgrade-mongo.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/upgrade-mongo.adoc @@ -1,4 +1,5 @@ = Upgrade MongoDB +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn how to upgrade MongoDB up to v4.4.15 in an installation of CircleCI server v4.8. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/usage-data-collection.adoc b/docs/server-admin-4.8/modules/operator/pages/usage-data-collection.adoc index 8a456ff5eb..ecb383f140 100644 --- a/docs/server-admin-4.8/modules/operator/pages/usage-data-collection.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/usage-data-collection.adoc @@ -1,4 +1,5 @@ = Usage data collection +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Learn about CircleCI server v4.8 usage data collection for the purpose of improving our product and services. :experimental: diff --git a/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc b/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc index cf780a6e43..bcc0ea8ed3 100644 --- a/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc @@ -1,4 +1,5 @@ = User authentication +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: CircleCI server v4.8 supports OAuth through GitHub or GitHub Enterprise. :experimental: diff --git a/docs/server-admin-4.8/modules/overview/pages/circleci-server-overview.adoc b/docs/server-admin-4.8/modules/overview/pages/circleci-server-overview.adoc index accb56db8a..3f55fbe8a3 100644 --- a/docs/server-admin-4.8/modules/overview/pages/circleci-server-overview.adoc +++ b/docs/server-admin-4.8/modules/overview/pages/circleci-server-overview.adoc @@ -1,4 +1,5 @@ = CircleCI server v4.8 overview +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: CircleCI server v4.8 is a continuous integration and continuous delivery (CI/CD) platform that you can install on your GCP or AWS Kubernetes cluster. :experimental: diff --git a/docs/server-admin-4.8/modules/overview/pages/release-notes.adoc b/docs/server-admin-4.8/modules/overview/pages/release-notes.adoc index 64eaa0f57d..dbdd7ce9ef 100644 --- a/docs/server-admin-4.8/modules/overview/pages/release-notes.adoc +++ b/docs/server-admin-4.8/modules/overview/pages/release-notes.adoc @@ -1,4 +1,5 @@ = Release notes +:page-noindex: true :page-platform: Server v4.8, Server Admin :page-description: Details of the new features included in each CircleCI server v4.8 release. :experimental: From 3bff13ab4205fd063d473b20aa52272058b7dcfb Mon Sep 17 00:00:00 2001 From: rosie yohannan <rosie@circleci.com> Date: Tue, 4 Nov 2025 13:29:21 +0000 Subject: [PATCH 04/10] fix lint errors in 4.8 --- .../operator/pages/backup-and-restore.adoc | 5 ++-- .../pages/configuring-external-services.adoc | 26 +++++++++---------- .../expanding-internal-database-volumes.adoc | 2 +- ...troduction-to-nomad-cluster-operation.adoc | 4 +-- .../pages/managing-build-artifacts.adoc | 4 +-- .../modules/operator/pages/managing-orbs.adoc | 2 +- .../pages/managing-user-accounts.adoc | 2 +- ...monitoring-stack-reference-helm-chart.adoc | 2 +- .../operator/pages/user-authentication.adoc | 2 +- styles/config/vocabularies/Docs/accept.txt | 3 ++- 10 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc b/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc index 5c11d5af8b..fca9fddefa 100644 --- a/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/backup-and-restore.adoc @@ -7,10 +7,9 @@ [#overview-backup] == Overview -When operating and administering CircleCI server, you will need to consider how to maintain backups and recover your installation, should there be a need to migrate it to another cluster or recover from a critical event. +When operating and administering CircleCI server, you will need to maintain backups and recover your installation if you need to migrate to another cluster or recover from a critical event. -CircleCI recommends link:https://velero.io/[Velero] for backup and restore. The benefit of this approach is that it not only restores your application's data, -but it also restores the state of the Kubernetes cluster and its resources at the time of the backup. CirleCI server supports backup and restore with Velero `1.12`. This document outlines recommendations for how to back up and restore your CircleCI server instance data and state using link:https://velero.io/[Velero]. +CircleCI recommends link:https://velero.io/[Velero] for backup and restore. The benefit of this approach is in restoring your application's data along with the state of the Kubernetes cluster and its resources at the time of the backup. CirleCI server supports backup and restore with Velero `1.12`. This document outlines recommendations for how to back up and restore your CircleCI server instance data and state using link:https://velero.io/[Velero]. NOTE: Backup and restore of the CircleCI services is dependent on Velero. If your cluster is lost, you will not be able to restore CircleCI until you have successfully started Velero in the cluster. From there you can recover the CircleCI services. diff --git a/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc b/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc index 83c4d07d70..5b2257c3fc 100644 --- a/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/configuring-external-services.adoc @@ -4,7 +4,7 @@ :page-description: This document describes how to configure the following external services for use with a CircleCI server v4.8 installation :experimental: -This page describes how to configure external services for use with either a new CircleCI server v4.8 installation or migrating internal PostgreSQL and MongoDB data from existing CircleCI server v4.8 installation to your externalized datastores. +This page describes how to configure external services for use with a new CircleCI server v4.8 installation. Migration to externalized PostgreSQL and MongoDB is also supported and described in this guide. [#postgresql] == PostgreSQL @@ -67,7 +67,7 @@ Consider running at least two PostgreSQL replicas to allow recovery from primary NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-postgres>> -When a CircleCI server instance is deployed, Postgres is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external Postgres, you may use the guide below to migrate your Postgres data to your external database. +When a CircleCI server instance is deployed, PostgreSQL is deployed internally by default via its Helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external PostgreSQL, you may use the guide below to migrate your PostgreSQL data to your external database. CAUTION: This process requires downtime. @@ -102,31 +102,31 @@ kubectl exec -it -n "$namespace" "$PG_POD" -- bash psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> ---- -You should be able to connect to your external Postgres at this point. If not, resolve any issues before proceeding. +You should be able to connect to your external PostgreSQL at this point. If not, resolve any issues before proceeding. TIP: You may use `helm upgrade ...` to restore your CircleCI server instance to a running state. ==== 3. Generate export of your internal PostgreSQL -. Retrieve your internal Postgres credentials: +. Retrieve your internal PostgreSQL credentials: + [source,shell] ---- PG_PASSWORD=$(kubectl -n "$namespace" get secrets postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) ---- + -NOTE: The username for your internal Postgres is `postgres`. The password is randomly generated unless directly set at installation. +NOTE: The username for your internal PostgreSQL is `postgres`. The password is randomly generated unless directly set at installation. -. Connect to your Postgres pod and perform a Postgres dump: +. Connect to your PostgreSQL pod and perform a PostgreSQL dump: + [source,shell] ---- kubectl -n "$namespace" exec -it "$PG_POD" -- bash -c "export PGPASSWORD='$PG_PASSWORD' && pg_dumpall -U postgres -c" > circle.sql ---- + -NOTE: This backup is created in the filesystem used by the Postgres pod. If you wish to store it locally, you may use `kubectl cp -n "$namespace" "$PG_POD":circle.sql /local/dir/circle.sql` +NOTE: This backup is created in the filesystem used by the PostgreSQL pod. If you wish to store it locally, you may use `kubectl cp -n "$namespace" "$PG_POD":circle.sql /local/dir/circle.sql` -. Clean up the Postgres Dump. Your internally deployed Postgres uses the username `postgres`. However, during the restore, the Postgres dump will drop all resources before trying to create new ones, including the `postgres` user. Access the Postgres pod where the dump is stored and run the following commands on the Postgres dump file to remove the lines that would delete the Postgres user. +. Clean up the PostgreSQL Dump. Your internally deployed PostgreSQL uses the username `postgres`. However, during the restore, the PostgreSQL dump will drop all resources before trying to create new ones, including the `postgres` user. Access the PostgreSQL pod where the dump is stored and run the following commands on the PostgreSQL dump file to remove the lines that would delete the PostgreSQL user. + [source,shell] ---- @@ -140,14 +140,14 @@ sed -i".bak" '/ALTER ROLE postgres WITH SUPERUSER INHERIT CREATEROLE CREATEDB LO ==== 4. Restore your data in your external PostgreSQL -While still connected to your the internally deployed Postgres, restore the dumped data to your external Postgres: +While still connected to your the internally deployed PostgreSQL, restore the dumped data to your external PostgreSQL: [source,shell] ---- psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> < circle.sql ---- -Now your external Postgres will have your CircleCI server data. In the next section you will update CircleCI server to point to your external Postgres. +Now your external PostgreSQL will have your CircleCI server data. In the next section you will update CircleCI server to point to your external PostgreSQL. [#connecting-your-external-postgres] === Connecting your external PostgreSQL instance to CircleCI server @@ -199,7 +199,7 @@ postgresql: ---- -- -The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized PostgreSQL instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized PostgreSQL instance, when you perform `helm upgrade` the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. [#backing-up-postgresql] @@ -223,7 +223,7 @@ NOTE: If using your own MongoDB instance, it needs to be version 3.6 or higher. NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-mongodb>> -When a CircleCI server instance deployed, MongoDB is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external MongoDB, you may use the guide below to migrate your Mongo data to your external database. +When a CircleCI server instance deployed, MongoDB is deployed internally by default via its Helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external MongoDB, you may use the guide below to migrate your Mongo data to your external database. CAUTION: This process requires downtime. @@ -360,4 +360,4 @@ mongodb: ---- -- -The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized MongoDB instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized MongoDB instance, when you perform `helm upgrade` the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. diff --git a/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc b/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc index adc44dd94c..32d03c5932 100644 --- a/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/expanding-internal-database-volumes.adoc @@ -7,7 +7,7 @@ [#overview] == Overview -If you have chosen to deploy either of the CircleCI databases (MongoDB or PostgreSQL) within the cluster, rather than externally provisioning these databases, there may come a point at which the storage space initially made available to these databases is no longer sufficient. Internal databases in your Kubernetes cluster make use of link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/[persistent volumes] for persistent storage. The size of these volumes is determined by persistence volume claims (PVCs). These PVCs request storage space based on what has been made available to the nodes in your cluster. +If you deployed CircleCI databases (MongoDB or PostgreSQL) internally within the cluster, their storage may eventually become insufficient. Internal databases in your Kubernetes cluster make use of link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/[persistent volumes] for persistent storage. The size of these volumes is determined by persistence volume claims (PVCs). These PVCs request storage space based on what has been made available to the nodes in your cluster. This document runs through the steps required to increase PVCs to expand the space available to your internally deployed databases. This operation should not require any downtime, unless you need to restart your database pods. diff --git a/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc b/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc index 109aa2e074..33b08823a3 100644 --- a/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc @@ -107,7 +107,7 @@ kubectl run nomad-tunnel --rm -it --restart=Never --image=alpine/socat -n <serve kubectl port-forward pod/nomad-tunnel 4646:4646 -n <server-namespace> ---- -. Navigate to ++`http://localhost:4646/ui`++ in your browser to access the Nomad UI. For more information on utilizing the Nomad UI, refer to the link:https://developer.hashicorp.com/nomad/tutorials/web-ui[Nomad documentation]. +. Navigate to `++http://localhost:4646/ui++` in your browser to access the Nomad UI. For more information on utilizing the Nomad UI, refer to the link:https://developer.hashicorp.com/nomad/tutorials/web-ui[Nomad documentation]. [#shutting-down-a-nomad-client] === Shutting down a Nomad client @@ -146,7 +146,7 @@ The script should use the commands in the section above to do the following: . Terminate the instance. [#externalize-servers] -== Externalize your Nomad Servers +== Externalize your Nomad servers From server v4.8, Nomad Servers may now be deployed externally to your Kubernetes cluster that hosts your installation of CircleCI server. Externalization of Nomad Servers is optional. Externalization of Nomad Servers can improve their stability. If you already have a CircleCI server instance with _internal_ Nomad Servers, the process to switch to external Nomad Servers is as follows: . Stop all builds on your CircleCI server instance. diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc index a44c601b09..2aded528c8 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-build-artifacts.adoc @@ -81,13 +81,13 @@ Also, by default, the following types will be rendered as plain text: | JavaScript | Text -| ecmascript +| ECMAscript | Application | JavaScript | Application -| ecmascript +| ECMAscript | Text | XML diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc index 23e95f14dd..5e0b90552c 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-orbs.adoc @@ -57,7 +57,7 @@ NOTE: `<orb-name>` and `<orb-version>` are optional. You can choose to only spec [using-orbs-behind-a-proxy] == Using orbs behind a proxy -When importing orbs, the CLI must be able to talk to the server installation and to `circleci.com`. If you want to do this when using a server installation behind a proxy, the CLI needs to be configured to use the proxy to make those requests to `circleci.com`, rather than proxying requests to the server install. For example: +When importing orbs, the CLI must be able to talk to the server installation and to `circleci.com`. To import orbs for a server installation behind a proxy, configure the CLI to use the proxy to make requests to `circleci.com`, rather than proxying requests to the server installation. For example: [source,bash] ---- diff --git a/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc b/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc index dc568c255f..1fedae08ba 100644 --- a/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/managing-user-accounts.adoc @@ -60,5 +60,5 @@ When using GitHub.com, you can limit who can register with your CircleCI install . Navigate to your CircleCI Admin Settings page. . Select *System Settings* from the Admin Setting menu. -. Scroll down to Required Org Membership List. +. Scroll down to Required Organization Membership List. . Enter the organization(s) you wish to approve. If entering more than one organization, use a comma-delimited string. diff --git a/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc b/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc index bd26839e58..b84ab153c2 100644 --- a/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc @@ -7,6 +7,6 @@ You may want to access metrics to gain insight into the performance of builds on your CircleCI server installation. This page provides a sample to show how observability can be set up using the most common tools via a Helm chart package. CircleCI does not support or provide any tooling to provide observability into the running services and infrastructure that CircleCI server operates in. -This guide is provided as a reference on how you could set up observability and is not an officially supported product. Our reference monitoring stack currently contains samples on how to set up and configure Grafana, Prometheus, and Telegraf, as well as some built-in dashboards for monitoring key service level indicators. +This guide is provided as a reference on how you can set up observability and is not an officially supported product. Our reference monitoring stack contains samples on how to set up and configure Grafana, Prometheus, and Telegraf, as well as some built-in dashboards for monitoring key service level indicators. You can find the link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#server-monitoring-stack[reference monitoring stack on our public GitHub], as well as more information on how you could configure this setup. You will also find information on link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#modifying-or-adding-grafana-dashboards[adding to or editing the existing dashboards]. diff --git a/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc b/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc index bcc0ea8ed3..61a753e82c 100644 --- a/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc +++ b/docs/server-admin-4.8/modules/operator/pages/user-authentication.adoc @@ -8,4 +8,4 @@ CircleCI server currently supports OAuth through GitHub or GitHub Enterprise. The default method for user account authentication in CircleCI server is through GitHub.com/GitHub Enterprise OAuth. -After your installation is up and running, provide users with a link to access the CircleCI application - for example, `<your-circleci-hostname>.com` – and they will be prompted to set up an account by running through the GitHub/GitHub Enterprise OAuth flow before being redirected to the CircleCI login screen. +Once your installation is up and running, provide users with a link to access the CircleCI application. For example, share `<your-circleci-hostname>.com`. Your users will be prompted to set up an account by running through the GitHub/GitHub Enterprise OAuth flow before being redirected to the CircleCI login screen. diff --git a/styles/config/vocabularies/Docs/accept.txt b/styles/config/vocabularies/Docs/accept.txt index 71387d9461..5245ef0188 100644 --- a/styles/config/vocabularies/Docs/accept.txt +++ b/styles/config/vocabularies/Docs/accept.txt @@ -128,6 +128,7 @@ Docker\sHub Dockerfiles? dockerize [Dd]otfiles? +ECMAscript [Ee]nablement EKS Elixir @@ -269,7 +270,7 @@ PowerShell [Pp]roxied [Pp]roxying Pusher -PVC +PVCs? Python pytest RAM From e907b6218d1e90feb47e126f8dc0e10af1c67f37 Mon Sep 17 00:00:00 2001 From: rosie yohannan <rosie@circleci.com> Date: Tue, 4 Nov 2025 13:47:21 +0000 Subject: [PATCH 05/10] fix lint errors --- .../ROOT/partials/installation/phase-2.adoc | 12 ++++----- .../ROOT/partials/installation/phase-2.adoc | 12 ++++----- .../operator/pages/backup-and-restore.adoc | 5 ++-- .../pages/configuring-external-services.adoc | 27 ++++++++++--------- .../expanding-internal-database-volumes.adoc | 2 +- ...troduction-to-nomad-cluster-operation.adoc | 6 ++--- .../pages/managing-build-artifacts.adoc | 4 +-- .../modules/operator/pages/managing-orbs.adoc | 2 +- .../pages/managing-user-accounts.adoc | 2 +- ...monitoring-stack-reference-helm-chart.adoc | 2 +- .../operator/pages/user-authentication.adoc | 2 +- 11 files changed, 38 insertions(+), 38 deletions(-) diff --git a/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-2.adoc b/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-2.adoc index 2241230a6a..e6efc54f76 100644 --- a/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-2.adoc +++ b/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-2.adoc @@ -79,7 +79,7 @@ NOTE: During the installation process, you may use the following command to gene [#api-token] === a. API token -The application requires a Kubernetes Secret containing an API token. This API token is used to facilitate internal API communication to api-service. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. +The application requires a Kubernetes Secret containing an API token. This API token is used to facilitate internal API communication to `api-service`. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. You have two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. [tabs] ==== @@ -107,7 +107,7 @@ CircleCI creates Secret:: -- **Option 2:** CircleCI creates the Kubernetes Secret for you. -CircleCI will create the Kubernetes Secret "api-token" automatically. +CircleCI will create the Kubernetes Secret `api-token` automatically. -- ==== @@ -115,7 +115,7 @@ CircleCI will create the Kubernetes Secret "api-token" automatically. [#session-cookie] === b. Session cookie -The application requires a session cookie key Kubernetes Secret, which CircleCI uses to sign session cookies. The Secret must be exactly 16 characters long. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. +The application requires a session cookie key Kubernetes Secret, which CircleCI uses to sign session cookies. The Secret must be exactly 16 characters long. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. You have two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. [tabs] ==== @@ -188,7 +188,7 @@ keyset: [#postgres-credentials] ==== i. Credentials -The application requires a Kubernetes Secret containing PostgreSQL credentials. This is true when using either the internal (default) or an externally hosted instance of PostgreSQL. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. +The application requires a Kubernetes Secret containing PostgreSQL credentials. This Secret is required when using either the internal (default) or an externally hosted instance of PostgreSQL. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. [tabs] ==== @@ -264,7 +264,7 @@ postgresql: === e. MongoDB credentials -The application requires a Kubernetes Secret containing MongoDB credentials. This is true when using either the internal (default) or an externally hosted instance of MongoDB. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. +The application requires a Kubernetes Secret containing MongoDB credentials. This Secret is required true when using either the internal (default) or an externally hosted instance of MongoDB. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. [tabs] ==== @@ -479,7 +479,7 @@ Lets Encrypt:: -- *Let's Encrypt* -https://letsencrypt.org/[Let's Encrypt] will request and manage certificates for you. This is a good option when the load balancer is publicly accessible. The following snippet (using your own email) can be added to `values.yaml`: +https://letsencrypt.org/[Let's Encrypt] will request and manage certificates for you. Let's Encrypt is a good option when the load balancer is publicly accessible. The following snippet (using your own email) can be added to `values.yaml`: [source,yaml] ---- diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc index ca9442ecce..9bb6b279dd 100644 --- a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-2.adoc @@ -79,7 +79,7 @@ NOTE: During the installation process, you may use the following command to gene [#api-token] === a. API token -The application requires a Kubernetes Secret containing an API token. This API token is used to facilitate internal API communication to api-service. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. +The application requires a Kubernetes Secret containing an API token. This API token is used to facilitate internal API communication to `api-service`. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. You have two options depending on whether you want to create the Kubernetes Secret yourself, or if you want CircleCI to create it for you. [tabs] ==== @@ -107,7 +107,7 @@ CircleCI creates Secret:: -- **Option 2:** CircleCI creates the Kubernetes Secret for you. -CircleCI will create the Kubernetes Secret "api-token" automatically. +CircleCI will create the Kubernetes Secret `api-token` automatically. -- ==== @@ -115,7 +115,7 @@ CircleCI will create the Kubernetes Secret "api-token" automatically. [#session-cookie] === b. Session cookie -The application requires a session cookie key Kubernetes Secret, which CircleCI uses to sign session cookies. The Secret must be exactly 16 characters long. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. There are two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. +The application requires a session cookie key Kubernetes Secret, which CircleCI uses to sign session cookies. The Secret must be exactly 16 characters long. Use a random string and store it securely. CircleCI will not be able to recover this value if lost. You have two options depending on whether you want to create the Kubernetes Secret, or if you want CircleCI to create it for you. [tabs] ==== @@ -188,7 +188,7 @@ keyset: [#postgres-credentials] ==== i. Credentials -The application requires a Kubernetes Secret containing PostgreSQL credentials. This is true when using either the internal (default) or an externally hosted instance of PostgreSQL. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. +The application requires a Kubernetes Secret containing PostgreSQL credentials. This Secret is required when using either the internal (default) or an externally hosted instance of PostgreSQL. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. [tabs] ==== @@ -264,7 +264,7 @@ postgresql: === e. MongoDB credentials -The application requires a Kubernetes Secret containing MongoDB credentials. This is true when using either the internal (default) or an externally hosted instance of MongoDB. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. +The application requires a Kubernetes Secret containing MongoDB credentials. This Secret is required when using either the internal (default) or an externally hosted instance of MongoDB. CircleCI will not be able to recover the values if lost. Based on how you prefer to manage Kubernetes Secrets there are two options. [tabs] ==== @@ -479,7 +479,7 @@ Lets Encrypt:: -- *Let's Encrypt* -https://letsencrypt.org/[Let's Encrypt] will request and manage certificates for you. This is a good option when the load balancer is publicly accessible. The following snippet (using your own email) can be added to `values.yaml`: +https://letsencrypt.org/[Let's Encrypt] will request and manage certificates for you. Let's Encrypt is a good option when the load balancer is publicly accessible. The following snippet (using your own email) can be added to `values.yaml`: [source,yaml] ---- diff --git a/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc b/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc index 3101c05979..cf4a59b829 100644 --- a/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/backup-and-restore.adoc @@ -6,10 +6,9 @@ [#overview-backup] == Overview -When operating and administering CircleCI server, you will need to consider how to maintain backups and recover your installation, should there be a need to migrate it to another cluster or recover from a critical event. +When operating and administering CircleCI server, you will need to maintain backups and recover your installation if you need to migrate to another cluster or recover from a critical event. -CircleCI recommends link:https://velero.io/[Velero] for backup and restore. The benefit of this approach is that it not only restores your application's data, -but it also restores the state of the Kubernetes cluster and its resources at the time of the backup. CirleCI server supports backup and restore with Velero `1.12`. This document outlines recommendations for how to back up and restore your CircleCI server instance data and state using link:https://velero.io/[Velero]. +CircleCI recommends link:https://velero.io/[Velero] for backup and restore. The benefit of this approach is in restoring your application's data along with the state of the Kubernetes cluster and its resources at the time of the backup. CirleCI server supports backup and restore with Velero `1.12`. This document outlines recommendations for how to back up and restore your CircleCI server instance data and state using link:https://velero.io/[Velero]. NOTE: Backup and restore of the CircleCI services is dependent on Velero. If your cluster is lost, you will not be able to restore CircleCI until you have successfully started Velero in the cluster. From there you can recover the CircleCI services. diff --git a/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc b/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc index abf3297357..0f77cca105 100644 --- a/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/configuring-external-services.adoc @@ -3,7 +3,8 @@ :page-description: This document describes how to configure the following external services for use with a CircleCI server 4.9 installation :experimental: -This page describes how to configure external services for use with either a new CircleCI server 4.9 installation or migrating internal PostgreSQL and MongoDB data from existing CircleCI server 4.9 installation to your externalized datastores. +This page describes how to configure external services for use with a new CircleCI server v4.9 installation. Migration to externalized PostgreSQL and MongoDB is also supported and described in this guide. + [#postgresql] == PostgreSQL @@ -66,7 +67,7 @@ Consider running at least two PostgreSQL replicas to allow recovery from primary NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-postgres>> -When a CircleCI server instance is deployed, Postgres is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external Postgres, you may use the guide below to migrate your Postgres data to your external database. +When a CircleCI server instance is deployed, PostgreSQL is deployed internally by default via its Helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external PostgreSQL, you may use the guide below to migrate your PostgreSQL data to your external database. CAUTION: This process requires downtime. @@ -101,31 +102,31 @@ kubectl exec -it -n "$namespace" "$PG_POD" -- bash psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> ---- -You should be able to connect to your external Postgres at this point. If not, resolve any issues before proceeding. +You should be able to connect to your external PostgreSQL at this point. If not, resolve any issues before proceeding. TIP: You may use `helm upgrade ...` to restore your CircleCI server instance to a running state. ==== 3. Generate export of your internal PostgreSQL -. Retrieve your internal Postgres credentials: +. Retrieve your internal PostgreSQL credentials: + [source,shell] ---- PG_PASSWORD=$(kubectl -n "$namespace" get secrets postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) ---- + -NOTE: The username for your internal Postgres is `postgres`. The password is randomly generated unless directly set at installation. +NOTE: The username for your internal PostgreSQL is `postgres`. The password is randomly generated unless directly set at installation. -. Connect to your Postgres pod and perform a Postgres dump: +. Connect to your PostgreSQL pod and perform a PostgreSQL dump: + [source,shell] ---- kubectl -n "$namespace" exec -it "$PG_POD" -- bash -c "export PGPASSWORD='$PG_PASSWORD' && pg_dumpall -U postgres -c" > circle.sql ---- + -NOTE: This backup is created in the filesystem used by the Postgres pod. If you wish to store it locally, you may use `kubectl cp -n "$namespace" "$PG_POD":circle.sql /local/dir/circle.sql` +NOTE: This backup is created in the filesystem used by the PostgreSQL pod. If you wish to store it locally, you may use `kubectl cp -n "$namespace" "$PG_POD":circle.sql /local/dir/circle.sql` -. Clean up the Postgres Dump. Your internally deployed Postgres uses the username `postgres`. However, during the restore, the Postgres dump will drop all resources before trying to create new ones, including the `postgres` user. Access the Postgres pod where the dump is stored and run the following commands on the Postgres dump file to remove the lines that would delete the Postgres user. +. Clean up the PostgreSQL Dump. Your internally deployed PostgreSQL uses the username `postgres`. However, during the restore, the PostgreSQL dump will drop all resources before trying to create new ones, including the `postgres` user. Access the PostgreSQL pod where the dump is stored and run the following commands on the PostgreSQL dump file to remove the lines that would delete the PostgreSQL user. + [source,shell] ---- @@ -139,14 +140,14 @@ sed -i".bak" '/ALTER ROLE postgres WITH SUPERUSER INHERIT CREATEROLE CREATEDB LO ==== 4. Restore your data in your external PostgreSQL -While still connected to your the internally deployed Postgres, restore the dumped data to your external Postgres: +While still connected to your the internally deployed PostgreSQL, restore the dumped data to your external PostgreSQL: [source,shell] ---- psql -h <your-external-postgres-host> -U postgres -p <your-external-postgres-port> < circle.sql ---- -Now your external Postgres will have your CircleCI server data. In the next section you will update CircleCI server to point to your external Postgres. +Now your external PostgreSQL will have your CircleCI server data. In the next section you will update CircleCI server to point to your external PostgreSQL. [#connecting-your-external-postgres] === Connecting your external PostgreSQL instance to CircleCI server @@ -198,7 +199,7 @@ postgresql: ---- -- -The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized PostgreSQL instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized PostgreSQL instance, when you perform `helm upgrade` the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. [#backing-up-postgresql] @@ -222,7 +223,7 @@ NOTE: If using your own MongoDB instance, it needs to be version 3.6 or higher. NOTE: If you are doing a fresh install of CircleCI server, then you can skip this section and head to <<connecting-your-external-mongodb>> -When a CircleCI server instance deployed, MongoDB is deployed internally by default via its helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external MongoDB, you may use the guide below to migrate your Mongo data to your external database. +When a CircleCI server instance deployed, MongoDB is deployed internally by default via its Helm chart. However, as an operator, you may wish to externalize this database to have better control over scalability and availability. Once you have configured your external MongoDB, you may use the guide below to migrate your Mongo data to your external database. CAUTION: This process requires downtime. @@ -359,4 +360,4 @@ mongodb: ---- -- -The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized MongoDB instance then when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. +The changes will take effect upon running `helm install/upgrade`. If you are completing a migration to an externalized MongoDB instance, when you perform `helm upgrade`, the scaled down pods will be scaled back to their replica numbers as defined by your `values.yaml`. diff --git a/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc b/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc index 8940d58a92..f9824720e0 100644 --- a/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/expanding-internal-database-volumes.adoc @@ -6,7 +6,7 @@ [#overview] == Overview -If you have chosen to deploy either of the CircleCI databases (MongoDB or PostgreSQL) within the cluster, rather than externally provisioning these databases, there may come a point at which the storage space initially made available to these databases is no longer sufficient. Internal databases in your Kubernetes cluster make use of link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/[persistent volumes] for persistent storage. The size of these volumes is determined by persistence volume claims (PVCs). These PVCs request storage space based on what has been made available to the nodes in your cluster. +If you deployed CircleCI databases (MongoDB or PostgreSQL) internally within the cluster, their storage may eventually become insufficient. Internal databases in your Kubernetes cluster make use of link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/[persistent volumes] for persistent storage. The size of these volumes is determined by persistence volume claims (PVCs). These PVCs request storage space based on what has been made available to the nodes in your cluster. This document runs through the steps required to increase PVCs to expand the space available to your internally deployed databases. This operation should not require any downtime, unless you need to restart your database pods. diff --git a/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc b/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc index 2f838607b0..9327ff4f93 100644 --- a/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/introduction-to-nomad-cluster-operation.adoc @@ -106,7 +106,7 @@ kubectl run nomad-tunnel --rm -it --restart=Never --image=alpine/socat -n <serve kubectl port-forward pod/nomad-tunnel 4646:4646 -n <server-namespace> ---- -. Navigate to ++`http://localhost:4646/ui`++ in your browser to access the Nomad UI. For more information on utilizing the Nomad UI, refer to the link:https://developer.hashicorp.com/nomad/tutorials/web-ui[Nomad documentation]. +. Navigate to `++http://localhost:4646/ui++` in your browser to access the Nomad UI. For more information on utilizing the Nomad UI, refer to the link:https://developer.hashicorp.com/nomad/tutorials/web-ui[Nomad documentation]. [#shutting-down-a-nomad-client] === Shutting down a Nomad client @@ -145,8 +145,8 @@ The script should use the commands in the section above to do the following: . Terminate the instance. [#externalize-servers] -== Externalize your Nomad Servers -From server version 4.8, Nomad Servers may now be deployed externally to your Kubernetes cluster that hosts your installation of CircleCI server. Externalization of Nomad Servers is optional. Externalization of Nomad Servers can improve their stability. If you already have a CircleCI server instance with _internal_ Nomad Servers, the process to switch to external Nomad Servers is as follows: +== Externalize your Nomad servers +From server version 4.8, Nomad servers may now be deployed externally to your Kubernetes cluster that hosts your installation of CircleCI server. Externalization of Nomad servers is optional. Externalization of Nomad Servers can improve their stability. If you already have a CircleCI server instance with _internal_ Nomad Servers, the process to switch to external Nomad servers is as follows: . Stop all builds on your CircleCI server instance. . Follow our installation instructions for deploying Nomad Servers on either xref:installation:phase-3-aws-execution-environments.adoc#nomad-servers[AWS] or xref:installation:phase-3-gcp-execution-environments.adoc#nomad-servers[GCP]. diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc index 98ec21698a..18012b71b3 100644 --- a/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/managing-build-artifacts.adoc @@ -80,13 +80,13 @@ Also, by default, the following types will be rendered as plain text: | JavaScript | Text -| ecmascript +|ECMAscript | Application | JavaScript | Application -| ecmascript +| ECMAscript | Text | XML diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc index 71e8129219..823aee6b45 100644 --- a/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/managing-orbs.adoc @@ -56,7 +56,7 @@ NOTE: `<orb-name>` and `<orb-version>` are optional. You can choose to only spec [using-orbs-behind-a-proxy] == Using orbs behind a proxy -When importing orbs, the CLI must be able to talk to the server installation and to `circleci.com`. If you want to do this when using a server installation behind a proxy, the CLI needs to be configured to use the proxy to make those requests to `circleci.com`, rather than proxying requests to the server install. For example: +When importing orbs, the CLI must be able to talk to the server installation and to `circleci.com`. To import orbs for a server installation behind a proxy, configure the CLI to use the proxy to make requests to `circleci.com`, rather than proxying requests to the server installation. For example: [source,bash] ---- diff --git a/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc b/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc index b9cdd285c2..f79792bd54 100644 --- a/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/managing-user-accounts.adoc @@ -59,5 +59,5 @@ When using GitHub.com, you can limit who can register with your CircleCI install . Navigate to your CircleCI Admin Settings page. . Select *System Settings* from the Admin Setting menu. -. Scroll down to Required Org Membership List. +. Scroll down to Required organization Membership List. . Enter the organization(s) you wish to approve. If entering more than one organization, use a comma-delimited string. diff --git a/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc b/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc index 782b504cd3..474b4987fc 100644 --- a/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/monitoring-stack-reference-helm-chart.adoc @@ -6,6 +6,6 @@ You may want to access metrics to gain insight into the performance of builds on your CircleCI server installation. This page provides a sample to show how observability can be set up using the most common tools via a Helm chart package. CircleCI does not support or provide any tooling to provide observability into the running services and infrastructure that CircleCI server operates in. -This guide is provided as a reference on how you could set up observability and is not an officially supported product. Our reference monitoring stack currently contains samples on how to set up and configure Grafana, Prometheus, and Telegraf, as well as some built-in dashboards for monitoring key service level indicators. +This guide is provided as a reference on how you could set up observability and is not an officially supported product. Our reference monitoring stack contains samples on how to set up and configure Grafana, Prometheus, and Telegraf, as well as some built-in dashboards for monitoring key service level indicators. You can find the link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#server-monitoring-stack[reference monitoring stack on our public GitHub], as well as more information on how you could configure this setup. You will also find information on link:https://github.com/CircleCI-Public/circleci-server-monitoring-reference?tab=readme-ov-file#modifying-or-adding-grafana-dashboards[adding to or editing the existing dashboards]. diff --git a/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc b/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc index d22a64d338..fca016f51b 100644 --- a/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/user-authentication.adoc @@ -7,4 +7,4 @@ CircleCI server currently supports OAuth through GitHub or GitHub Enterprise. The default method for user account authentication in CircleCI server is through GitHub.com/GitHub Enterprise OAuth. -After your installation is up and running, provide users with a link to access the CircleCI application - for example, `<your-circleci-hostname>.com` – and they will be prompted to set up an account by running through the GitHub/GitHub Enterprise OAuth flow before being redirected to the CircleCI login screen. +Once your installation is up and running, provide users with a link to access the CircleCI application. For example, share `<your-circleci-hostname>.com`. Your users will be prompted to set up an account by running through the GitHub/GitHub Enterprise OAuth flow before being redirected to the CircleCI login screen. From 143def27d8453b8173efb27750d7b61e8b97ec72 Mon Sep 17 00:00:00 2001 From: rosie yohannan <rosie@circleci.com> Date: Tue, 11 Nov 2025 11:14:18 +0000 Subject: [PATCH 06/10] bring over change from 4.8 change PR 9717 --- .../modules/operator/pages/data-retention.adoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc b/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc index 6041f84613..a64f7c5b2f 100644 --- a/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/data-retention.adoc @@ -15,14 +15,15 @@ You can set up retention policies for both Mongo and PostgreSQL to clean up data + [source,bash] ---- -kubectl exec -it <frontend-xxx> -- /bin/bash +kubectl port-forward $(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}' -n <namespace>) -n <namespace> 16000:6005 & ---- + Then, connect to the REPL: + [source,bash] ---- -lein repl :connect 6005 +docker run --rm -it clojure bash +lein repl :connect host.docker.internal:16000 ---- . Once connected, the current setting can be verified using the following command: From 300e848560c4dac0e48339725cd9d491ac2ddf1f Mon Sep 17 00:00:00 2001 From: soulchips <akil@circleci.com> Date: Wed, 19 Nov 2025 10:22:55 -0500 Subject: [PATCH 07/10] adding note about the supported version of docker --- antora-playbook.yml | 1 + .../modules/ROOT/partials/installation/phase-1.adoc | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/antora-playbook.yml b/antora-playbook.yml index 7337dd3749..687104538e 100644 --- a/antora-playbook.yml +++ b/antora-playbook.yml @@ -54,6 +54,7 @@ asciidoc: serverversion49: 4.9.0 serverversion: 3.4.8 terraformversion: 0.15.4 + dockerversion: v28 kubectlversion: 1.19 helmversion: 3.9.2 helmdiffversion: 3.5.0 diff --git a/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-1.adoc b/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-1.adoc index 2a70c7a641..a9544a96b9 100644 --- a/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-1.adoc +++ b/docs/server-admin-4.8/modules/ROOT/partials/installation/phase-1.adoc @@ -53,6 +53,11 @@ endif::env-aws[] | TBC | Required for installations outside AWS and GCP, for example, local installation. +| link:https://docs.docker.com/engine/install/[Docker] +| {dockerversion} +| Running containerized workloads on nomad clients +| At this time docker v29 is not supported + |=== [#create-a-vpc] From bda2cc5f744b768019b82e187fc7fca1a0f462ea Mon Sep 17 00:00:00 2001 From: soulchips <akil@circleci.com> Date: Wed, 19 Nov 2025 10:27:22 -0500 Subject: [PATCH 08/10] adding supported docker version to 4.9 docs --- .../modules/ROOT/partials/installation/phase-1.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc index c70eaf745d..739e473276 100644 --- a/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc +++ b/docs/server-admin-4.9/modules/ROOT/partials/installation/phase-1.adoc @@ -53,6 +53,11 @@ endif::env-aws[] | TBC | Required for installations outside AWS and GCP, for example, local installation. +| link:https://docs.docker.com/engine/install/[Docker] +| {dockerversion} +| Running containerized workloads on nomad clients +| At this time docker v29 is not supported + |=== [#create-a-vpc] From 51cfed77907ca8f0cf9e29f156023e17251a8ac0 Mon Sep 17 00:00:00 2001 From: Chris Stephen <chris.stephen@circleci.com> Date: Fri, 21 Nov 2025 06:33:51 -0400 Subject: [PATCH 09/10] Point server 4.9 upgrade steps to newest server-terraform patch (#9772) --- .../modules/installation/pages/upgrade-server.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc b/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc index 03a8e5dc40..11c2bd8644 100644 --- a/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc +++ b/docs/server-admin-4.9/modules/installation/pages/upgrade-server.adoc @@ -38,7 +38,7 @@ Customers that do not perform this step may have issues restoring Vault from bac [source,shell,subs=attributes+] helm diff upgrade circleci-server oci://cciserver.azurecr.io/circleci-server -n $namespace --version {serverversion49} -f <path-to-values.yaml> --username $USERNAME --password $PASSWORD -. Upgrade your Nomad clients and servers (if externalized) Terraform modules to the link:https://github.com/CircleCI-Public/server-terraform/releases/tag/4.9.0[4.9 release]. Follow the documentation to plan and apply the Terraform changes for your xref:phase-3-aws-execution-environments.adoc#create-your-cluster-with-terraform[AWS] or xref:phase-3-gcp-execution-environments.adoc#create-your-cluster-with-terraform[GCP] environment. +. Upgrade your Nomad clients and servers (if externalized) Terraform modules to the link:https://github.com/CircleCI-Public/server-terraform/releases/tag/{serverversion49}[4.9 release]. Follow the documentation to plan and apply the Terraform changes for your xref:phase-3-aws-execution-environments.adoc#create-your-cluster-with-terraform[AWS] or xref:phase-3-gcp-execution-environments.adoc#create-your-cluster-with-terraform[GCP] environment. . Perform the upgrade: + From 829a5979b4366258430d23bec6cc03e180edb254 Mon Sep 17 00:00:00 2001 From: Logan G <37604784+imlogang@users.noreply.github.com> Date: Fri, 21 Nov 2025 11:37:06 -0500 Subject: [PATCH 10/10] [ONPREM-2564] - Add docs on how to go from 4.4 to 7.0 (#9776) * Add docs on how to go from 4.4 to 7.0 * style and formatting * Add curly brackets around manual response --------- Co-authored-by: rosie yohannan <rosie@circleci.com> --- .../modules/operator/pages/upgrade-mongo.adoc | 209 +++++++++++++++--- 1 file changed, 175 insertions(+), 34 deletions(-) diff --git a/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc b/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc index 254e09efde..314030cb27 100644 --- a/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc +++ b/docs/server-admin-4.9/modules/operator/pages/upgrade-mongo.adoc @@ -1,95 +1,236 @@ = Upgrade MongoDB :page-platform: Server 4.9, Server Admin -:page-description: Learn how to upgrade MongoDB up to v4.4.15 in an installation of CircleCI server 4.9. +:page-description: Learn how to upgrade MongoDB up to v7.0.15 in an installation of CircleCI server 4.9. :experimental: -MongoDB is a database service used by CircleCI server. This page describes how to upgrade MongoDB to version `4.4.15`. +MongoDB is a database service used by CircleCI server. This page describes how to upgrade MongoDB from version `3.6.22` to `7.0.15`. MongoDB `3.6.22` is shipped with CircleCI server 4.9. -[#prerequisites] -== Prerequisites +[#upgrade-mongodb-to-4.4] +== Upgrade MongoDB to 4.4 -* Ensure backups have been taken. You will need a backup of MongoDB to restore to in case anything goes wrong during the upgrade progress -* You are prepared to modify the `values.yaml` -* `helm upgrade` will work from your system to upgrade the cluster -* MongoDB root password is available +[#prerequisites-4.4] +=== Prerequisites -[#script-upgrade] -== Scripted upgrade -We have created a shell script which may be used to upgrade your cluster's MongoDB instance link:https://github.com/CircleCI-Public/server-scripts/tree/main/upgrade-mongo-to-4.4[here]. -If you wish, you may use the following instructions to manually upgrade your cluster's MongoDB +* Ensure backups have been taken. You need a backup of MongoDB to restore in case anything goes wrong during the upgrade progress. +* You are able to modify your `values.yaml` file. +* `helm upgrade` works from your system to upgrade the cluster. +* Your MongoDB root password is available -[#manual-upgrade] -== Manual upgrade +[#script-upgrade-4.4] +=== Scripted upgrade +We have created a shell script that you can use to upgrade your cluster's MongoDB instance link:https://github.com/CircleCI-Public/server-scripts/tree/main/upgrade-mongo-to-4.4[here]. -=== 1. Upgrade from MongoDB 3.6 to 4.0 +Alternatively, you can use the following instructions to manually upgrade your cluster's MongoDB: + +[#manual-upgrade-4.4] +=== Manual upgrade + +==== 1. Upgrade from MongoDB 3.6 to 4.0 . Your `values.yaml` should contain the following snippet: + -```yaml +[source,yaml] +---- mongodb: image: tag: 3.6.22-debian-9-r38 -``` +---- + To begin the upgrade process, change the tag to `4.0.27-debian-9-r118`: + -```yaml +[source,yaml] +---- mongodb: image: tag: 4.0.27-debian-9-r118 -``` +---- . Run `helm upgrade` to update your installation. . Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) + -```bash -kubectl exec -it mongodb-0 -- mongo -u root -p <password> +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongo -u root -p <password> db.adminCommand( { setFeatureCompatibilityVersion: "4.0" } ) -``` +---- . You should get a `{ "ok" : 1 }` response from Mongo. Exit out of the MongoDB shell and pod. -=== 2. Upgrade from MongoDB 4.0 to 4.2 +==== 2. Upgrade from MongoDB 4.0 to 4.2 . Change the tag to `4.2.21-debian-10-r8`: + -```yaml +[source,yaml] +---- mongodb: image: tag: 4.2.21-debian-10-r8 -``` +---- . Run `helm upgrade` to update your installation. . Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) + -```bash -kubectl exec -it mongodb-0 -- mongo -u root -p <password> +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongo -u root -p <password> db.adminCommand( { setFeatureCompatibilityVersion: "4.2" } ) -``` +---- . You should get `{ "ok" : 1 }` again. Exit out of the shell and pod. -=== 3. Upgrade from MongoDB 4.2 to 4.4 +==== 3. Upgrade from MongoDB 4.2 to 4.4 . Change the tag one more time to `4.4.15-debian-10-r8`: + -```yaml +[source,yaml] +---- mongodb: image: tag: 4.4.15-debian-10-r8 -``` +---- . Run `helm upgrade` to update your installation. . Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) + -```bash -kubectl exec -it mongodb-0 -- mongo -u root -p <password> +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongo -u root -p <password> db.adminCommand( { setFeatureCompatibilityVersion: "4.4" } ) -``` +---- . Once you receive `{ "ok" : 1 }`, you have successfully upgraded your MongoDB to 4.4.15. + +[#upgrade-mongodb-to-7.0] +== Upgrade MongoDB to 7.0 + +[#prerequisites-7.0] +=== Prerequisites + +* You have completed the `4.4.15` upgrade above. +* Ensure backups have been taken. You need a backup of MongoDB to restore in case anything goes wrong during the upgrade progress. +* You are able to modify your `values.yaml` file. +* `helm upgrade` works from your system to upgrade the cluster. +* Your MongoDB root password is available. + +[#script-upgrade-7.0] +=== Scripted upgrade +We have created a shell script that you can use to upgrade your cluster's MongoDB instance link:https://github.com/CircleCI-Public/server-scripts/tree/main/upgrade-mongo-to-7.0[here]. + +Alternatively, you can use the following instructions to manually upgrade your cluster's MongoDB: + +[#manual-upgrade-7.0] +=== Manual upgrade + +==== 1. Upgrade from MongoDB 4.4 to 5.0 + +. Your `values.yaml` should contain the following snippet: ++ +[source,yaml] +---- +mongodb: + image: + tag: 4.4.15-debian-10-r8 +---- ++ +To begin the upgrade process, change the tag to `5.0.24-debian-11-r20` and update the probes to use `mongosh`: ++ +[source,yaml] +---- +mongodb: + image: + tag: 5.0.24-debian-11-r20 + livenessProbe: + enabled: false + readinessProbe: + enabled: false + customLivenessProbe: + exec: + command: + - mongosh + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + customReadinessProbe: + exec: + command: + - bash + - -ec + - | + mongosh --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true' + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +---- + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. Note that MongoDB 5.0+ uses `mongosh` instead of `mongo`. (Be sure to replace `<password>` with your MongoDB root password.) ++ +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongosh -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "5.0" } ) +---- + +. You should get an `{ "ok" : 1 }` response from MongoDB. Exit out of the MongoDB shell and pod. + +==== 2. Upgrade from MongoDB 5.0 to 6.0 + +. Change the tag to `6.0.13-debian-11-r21` while being sure to keep the new `customLivenessProbe` and `customReadinessProbe` parameters.: ++ +[source,yaml] +---- +mongodb: + image: + tag: 6.0.13-debian-11-r21 +---- + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version. (Be sure to replace `<password>` with your MongoDB root password.) ++ +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongosh -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "6.0" } ) +---- + +. You should get `{ "ok" : 1 }`` again. Exit out of the shell and pod. + +==== 3. Upgrade from MongoDB 6.0 to 7.0 + +. Change the tag to `7.0.15-debian-12-r2`: ++ +[source,yaml] +---- +mongodb: + image: + tag: 7.0.15-debian-12-r2 +---- + +. Run `helm upgrade` to update your installation. + +. Once the `helm upgrade` has completed and MongoDB has rolled, you will need to `exec` into the pod (with the root password handy) to modify the compatibility version (be sure to replace `<password>` with your MongoDB root password.) ++ +CAUTION: MongoDB 7.0+ upgrade is one-way and cannot be downgraded. ++ +NOTE: MongoDB 7.0+ requires the `confirm: true` parameter. ++ +[source,console] +---- +$ kubectl exec -it mongodb-0 -- mongosh -u root -p <password> +db.adminCommand( { setFeatureCompatibilityVersion: "7.0", confirm: true } ) +---- + +. Once you receive `{ "ok" : 1 }``, you have successfully upgraded your MongoDB to 7.0.15.