diff --git a/.github/workflows/ci-chart.yml b/.github/workflows/ci-chart.yml new file mode 100644 index 0000000..b5acad6 --- /dev/null +++ b/.github/workflows/ci-chart.yml @@ -0,0 +1,53 @@ +name: Publish Chart + +on: + release: + types: + - published + tags: + - v* + +permissions: + contents: read + +jobs: + publish-charts: + env: + HELM_CHART_DIR: helm/charts/kdp-infra + + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + + - name: Get Version + id: get_version + run: | + CHART_VERSION=${GITHUB_REF#refs/tags/} + echo "CHART_VERSION=${CHART_VERSION}" >> $GITHUB_OUTPUT + + - name: Install Helm + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 + with: + version: v3.8.2 + + - name: Setup node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 + with: + node-version: "20" + - name: Generate helm doc + run: | + make helm-doc-gen + + - name: Docker Login + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + with: + username: ${{ secrets.REG_USER }} + password: ${{ secrets.REG_PASSWD }} + + - name: Tag helm chart and push to registry + run: | + chart_version=${{ steps.get_version.outputs.CHART_VERSION }} + sed -i "s/1.0.0/${chart_version}/g" $HELM_CHART_DIR/Chart.yaml + sed -i "s/1.0.0/${chart_version}/g" $HELM_CHART_DIR/values.yaml + helm package ./$HELM_CHART_DIR + helm push kdp-infra-${chart_version}.tgz oci://${{ secrets.CONTAINER_REGISTRY }}/linktimecloud \ No newline at end of file diff --git a/.github/workflows/ci-infra-build.yaml b/.github/workflows/ci-infra-build.yaml new file mode 100644 index 0000000..38c8f80 --- /dev/null +++ b/.github/workflows/ci-infra-build.yaml @@ -0,0 +1,129 @@ +name: CI-Infra-Build + +on: + release: + types: + - published + tags: + - v* + +permissions: + contents: read + +env: + # Common versions + GO_VERSION: '1.21' + GOLANG_CI_VERSION: 'v1.50' + +jobs: + detect-noop: + runs-on: ubuntu-22.04 + outputs: + noop: ${{ steps.noop.outputs.should_skip }} + permissions: + actions: write + steps: + - name: Detect No-op Changes + id: noop + uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + paths_ignore: '["**.md", "**.mdx", "**.png", "**.jpg"]' + do_not_skip: '["workflow_dispatch", "schedule", "push"]' + continue-on-error: true + + staticcheck: + runs-on: ubuntu-22.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Setup Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Checkout + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + with: + submodules: true + + - name: Static Check + run: make static-check + + lint: + runs-on: ubuntu-22.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests + + steps: + - name: Setup Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Checkout + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + with: + submodules: true + + # This action uses its own setup-go, which always seems to use the latest + # stable version of Go. We could run 'make lint' to ensure our desired Go + # version, but we prefer this action because it leaves 'annotations' (i.e. + # it comments on PRs to point out linter violations). + - name: Lint + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + with: + version: ${{ env.GOLANGCI_VERSION }} + + check-cli-build: + runs-on: ubuntu-22.04 + needs: detect-noop + if: needs.detect-noop.outputs.noop != 'true' + + steps: + - name: Checkout + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + with: + submodules: true + + - name: Get Version + id: get_version + run: | + VERSION=${GITHUB_REF#refs/tags/} + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + + - name: Setup Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Cache Go Dependencies + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 + with: + path: .work/pkg + key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-pkg- + - name: Set up QEMU + uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v3.1.0 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v3.4.0 + + - uses: actions/checkout@v4 + - name: Docker Login + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + with: + username: ${{ secrets.REG_USER }} + password: ${{ secrets.REG_PASSWD }} + + - name: Run Build Images + run: | + version=${{ steps.get_version.outputs.VERSION }} + make multi-arch-builder IMG_REGISTRY=${{ secrets.CONTAINER_REGISTRY }} VERSION=${version} + make publish IMG_REGISTRY=${{ secrets.CONTAINER_REGISTRY }} VERSION=${version} + + - name: Cleanup binary + run: make kdp-cli-clean \ No newline at end of file diff --git a/.gitignore b/.gitignore index a216047..c838cd8 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,4 @@ bin # Output of the go coverage tool, specifically when used with LiteIDE *.out -# build -kdp diff --git a/Makefile b/Makefile index aeaf556..a3d46cf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ include makefiles/const.mk include makefiles/build.mk +include makefiles/build-helm-package.mk include makefiles/dependency.mk ##@ General diff --git a/catalog/mysql/metadata.yaml b/catalog/mysql/metadata.yaml index aa30d8d..5cb2707 100644 --- a/catalog/mysql/metadata.yaml +++ b/catalog/mysql/metadata.yaml @@ -1,6 +1,7 @@ name: MySQL category: 系统/大数据开发工具 description: MySQL主从架构的容器化实现,默认创建一套MySQL主从服务,MySQL exporter和备份服务 +group: Middleware i18n: en: category: system.dataManagement diff --git a/catalog/operator/metadata.yaml b/catalog/operator/metadata.yaml index d8402d6..26e5b21 100644 --- a/catalog/operator/metadata.yaml +++ b/catalog/operator/metadata.yaml @@ -1,6 +1,7 @@ name: Operator category: 系统/大数据开发工具 description: Operator 是 Kubernetes 的扩展软件, 它利用定制资源管理应用及其组件。 +group: GlobalService i18n: en: category: system.dataManagement diff --git a/docker/hooks/infra-hook.py b/docker/hooks/infra-hook.py new file mode 100644 index 0000000..b2aa37a --- /dev/null +++ b/docker/hooks/infra-hook.py @@ -0,0 +1,462 @@ +#!/usr/bin/env python3 + +import json +import os +import shutil +import subprocess +import time +import traceback + +import sys +from deepdiff import DeepDiff +from functools import reduce +from jsonpath import jsonpath + +from kubernetes import client, config + + +BASE_DIR = os.environ["HOME"] + "/.kdp" +OPERATOR_LOG_FILE = f"{BASE_DIR}/operator.log" +INFRA_PROJECT_DIR = f"{BASE_DIR}/src/infra" +HOOK_RESULT_FILE = os.environ.get("BINDING_CONTEXT_PATH") + +LABELS_TERMINATED = "installer.kdp.io/terminated" +ANNOTATIONS_LAST_COMMAND = "installer.kdp.io/last-command" +ANNOTATIONS_LAST_SPEC = "installer.kdp.io/spec-data" + +RETRY_NUM = 0 +MAX_RETRY_NUM = 10 +MAX_INSTALL_NUM = 3 + +# +RunningWorkflow = "runningWorkflow" + +WorkflowFailed = "workflowFailed" +# Component insufficiency +Insufficiency = "insufficiency" +# All components are in the correct status +Running = "running" +# The component is complete, but the status is incorrect +Unhealthy = "unhealthy" +# Too many runs still failed +Terminated = "terminated" + + +ADDON_NAMES = ["addon-fluxcd", "addon-openebs", "addon-plg-stack", "addon-kong", "addon-mysql", "addon-kdp-core"] +STEPS = [] + +config.load_incluster_config() +api_instance = client.CustomObjectsApi() + + +def get_items(obj, items, default=None): + """递归获取数据 + """ + if isinstance(items, str): + items = items.strip(".").split(".") + try: + return reduce(lambda x, i: x[i], items, obj) + except (IndexError, KeyError, TypeError): + return default + + +def transform_data(data): + transformed_list = [] + if data: + for item in data: + name = item["name"] + value = item["value"] + transformed_list.append(f"--set {name}={value}") + return transformed_list + + +def execute_cmd(command, save_file=True): + try: + output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) + if save_file: + with open(OPERATOR_LOG_FILE, 'w') as f: + f.write(output.decode("utf-8")) + return output.decode("utf-8") + except subprocess.CalledProcessError as e: + print(e) + raise + except Exception as e: + print(e) + raise + + +def deal_operator_message(): + with open(OPERATOR_LOG_FILE, 'r') as f: + lines = f.readlines() + line_data = lines[-10:] + operator_log_data = ''.join(line_data) + message_data = operator_log_data.replace('"', '\"') + message_data = message_data.replace("[\x1b[36m", '') + message_data = message_data.replace("\x1b[0m]", '') + return message_data + + +def get_set_parameters(context_data): + kdp_repo = get_items(context_data, ["object", "spec", "kdpRepo"]) + kdp_repo_ref = get_items(context_data, ["object", "spec", "kdpRepoRef"]) + docker_registry = get_items(context_data, ["object", "spec", "dockerRegistry"]) + helm_repository = get_items(context_data, ["object", "spec", "helmRepository"]) + force_reinstall = get_items(context_data, ["object", "spec", "forceReinstall"]) + artifact_server = get_items(context_data, ["object", "spec", "artifactServer"]) + spec_parameters = get_items(context_data, ["object", "spec", "setParameters"]) + set_cmd = "" + if kdp_repo != "nil": + set_cmd = f"{set_cmd} --kdp-repo {kdp_repo}" + + if kdp_repo_ref != "nil": + set_cmd = f"{set_cmd} --kdp-repo-ref {kdp_repo_ref}" + + if force_reinstall: + set_cmd = f"{set_cmd} --force-reinstall" + + if artifact_server != "nil": + set_cmd = f"{set_cmd} --artifact-server {artifact_server}" + + if docker_registry != "nil": + set_cmd = f"{set_cmd} --set docker-registry={docker_registry}" + + if helm_repository != "nil": + set_cmd = f"{set_cmd} --set helm-repository={helm_repository}" + + set_parameters = " ".join(transform_data(spec_parameters)) + if set_parameters: + set_cmd = f"{set_cmd} {set_parameters}" + + return set_cmd + + +def check_infra_spec_data(context_data): + last_spec_data = get_items(context_data, ["object", "metadata", "annotations", ANNOTATIONS_LAST_SPEC]) + current_spec_data = get_items(context_data, ["object", "spec"]) + file_cmp_result = DeepDiff(json.loads(last_spec_data), current_spec_data, ignore_order=True) + if not file_cmp_result: + print(f"infra spec not change, not operator") + return True + return False + + +def check_infra_step_status(check_step_retry=0): + steps = [] + status = Running + for addon_name in ADDON_NAMES: + try: + addon_application = VelaApplication(addon_name).get_application() + except Exception as e: + print(e) + steps.append({ + "name": addon_name, + "status": "", + "message": "", + "lastExecuteTime": "" + }) + status = Insufficiency + continue + addon_status = jsonpath(addon_application, '$.status.status') + message = jsonpath(addon_application, '$.status.services[*].message') + last_execute_time = jsonpath(addon_application, '$.status.workflow.endTime') + steps.append({ + "name": addon_name, + "status": addon_status[0] if addon_status else "", + "message": message[0] if message else "", + "lastExecuteTime": last_execute_time[0] if last_execute_time else "" + }) + if addon_status[0] != "running": + status = Unhealthy + continue + # if status is unhealthy, retry MAX_INSTALL_NUM times + if status == Unhealthy: + if check_step_retry < MAX_INSTALL_NUM: + check_step_retry += 1 + time.sleep(3) + return check_infra_step_status(check_step_retry) + return status, steps + + +class KubernetesCrdController(object): + def __init__(self): + self.api_instance = api_instance + + +class VelaApplication(KubernetesCrdController): + def __init__(self, name): + super().__init__() + self.group = "core.oam.dev" + self.version = "v1beta1" + self.plural = "applications" + self.nameSpace = "vela-system" + self.name = name + + def get_application(self): + try: + return self.api_instance.get_namespaced_custom_object( + self.group, self.version, self.nameSpace, self.plural, self.name) + except Exception as e: + print(e) + raise + + +class InfraKubernetes(KubernetesCrdController): + def __init__(self, name): + super().__init__() + self.group = "installer.kdp.io" + self.version = "v1alpha1" + self.plural = "infrastructures" + self.name = name + + def get_infra_status(self): + try: + infra_data = self.api_instance.get_cluster_custom_object( + self.group, self.version, self.plural, self.name) + return get_items(infra_data, ["status", "status"]) + except Exception as e: + print(e) + raise + + def path_status(self, status=None, message=None, steps=None): + body_status = {} + if status: + body_status["status"] = status + if message: + body_status["message"] = message + if steps: + body_status["subSteps"] = steps + body = { + "status": body_status + } + api_instance.patch_cluster_custom_object_status( + self.group, self.version, self.plural, self.name, body) + print("patch infra status success") + + def path_annotations_command(self, command): + try: + infra_data = self.api_instance.get_cluster_custom_object( + self.group, self.version, self.plural, self.name) + infra_data["metadata"]["annotations"] = { + ANNOTATIONS_LAST_COMMAND: command + } + self.api_instance.patch_cluster_custom_object( + self.group, self.version, self.plural, self.name, infra_data) + except Exception as e: + print(e) + raise + + def path_label_terminated(self): + try: + infra_data = self.api_instance.get_cluster_custom_object( + self.group, self.version, self.plural, self.name) + infra_data["metadata"]["labels"] = { + LABELS_TERMINATED: "infra" + } + self.api_instance.patch_cluster_custom_object(self.group, self.version, self.plural, self.name, infra_data) + except Exception as e: + print(e) + raise + + def path_annotations_last_spec(self, spec_data): + try: + infra_data = self.api_instance.get_cluster_custom_object( + self.group, self.version, self.plural, self.name) + infra_data["metadata"]["annotations"] = { + ANNOTATIONS_LAST_SPEC: json.dumps(spec_data) + } + self.api_instance.patch_cluster_custom_object( + self.group, self.version, self.plural, self.name, infra_data) + except Exception as e: + print(traceback.format_exc()) + print(e) + raise + + +class InfraController(InfraKubernetes): + def __init__(self, name, infra_spec): + super().__init__(name) + self.name = name + self.infra_spec = infra_spec + + def pre_infra_operator(self): + self.path_status(RunningWorkflow, "nil", ["nil"]) + if os.path.exists(OPERATOR_LOG_FILE): + os.remove(OPERATOR_LOG_FILE) + + def create_infra(self, set_cmd): + self.pre_infra_operator() + # Store the spec data + self.path_annotations_last_spec(self.infra_spec) + create_cmd = f"kdp install {set_cmd}" + try: + self.handle_infra("create", create_cmd) + except Exception as e: + print(e) + + def upgrade_infra(self, set_cmd): + self.pre_infra_operator() + upgrade_cmd = f"kdp upgrade {set_cmd}" + try: + self.handle_infra("upgrade", upgrade_cmd) + # Store the spec data after the update is successful + self.path_annotations_last_spec(self.infra_spec) + except Exception as e: + print(traceback.format_exc()) + print(e) + + def schedule_infra(self, schedule_cmd): + try: + self.handle_infra("schedule", schedule_cmd) + except Exception as e: + print(e) + + def handle_infra(self, handle_type, handle_cmd): + global RETRY_NUM + try: + self.path_annotations_command(handle_cmd) + if RETRY_NUM > MAX_RETRY_NUM: + self.path_label_terminated() + + print(f"{handle_type} {self.name} with command: {handle_cmd}") + _ = execute_cmd(handle_cmd) + message_data = deal_operator_message() + status, steps = check_infra_step_status() + self.path_status(status, message_data, steps) + if handle_type == "schedule": + if status == Running: + RETRY_NUM = 0 + if status != Running: + RETRY_NUM += 1 + except Exception as e: + self.path_status(status=WorkflowFailed, message=str(e)) + if handle_type == "schedule": + RETRY_NUM += 1 + raise + + +def check_labels(context_data): + labels = get_items(context_data, ["object", "metadata", "labels"]) + if labels: + for label in labels: + if label == LABELS_TERMINATED: + return True + return False + + +def get_last_command(context_data): + return get_items(context_data, ["object", "metadata", "annotations", ANNOTATIONS_LAST_COMMAND]) + + +def get_last_spec(context_data): + return get_items(context_data, ["object", "metadata", "annotations", ANNOTATIONS_LAST_SPEC]) + + +def infra_operator(): + hook_result_file = os.environ.get("BINDING_CONTEXT_PATH") + shutil.copy(hook_result_file, "/tmp/modify.json") + with open(hook_result_file, 'r') as hook_file: + context_file_data = hook_file.read() + json_data = json.loads(context_file_data) + + for context_data in json_data: + object_type = get_items(context_data, ["type"]) + if object_type == "Event": + object_watch_type = get_items(context_data, ["watchEvent"]) + object_name = get_items(context_data, ["object", "metadata", "name"]) + object_kind = get_items(context_data, ["object", "kind"]) + object_spec = get_items(context_data, ["object", "spec"]) + set_cmd = get_set_parameters(context_data) + + shutil.copy(HOOK_RESULT_FILE, "/tmp/Event.json") + + # check labels + if check_labels(context_data): + print(f"{object_kind}/{object_name} labels is terminated, disallowed operation") + continue + + if object_watch_type == "Added": + InfraController(object_name, object_spec).create_infra(set_cmd) + if object_watch_type == "Modified": + + # check spec data change or not + if check_infra_spec_data(context_data): + continue + InfraController(object_name, object_spec).upgrade_infra(set_cmd) + if object_watch_type == "Deleted": + print(f"{object_kind}/{object_name} delete, not support operator") + continue + if object_type == "Schedule": + shutil.copy(HOOK_RESULT_FILE, "/tmp/Schedule.json") + schedule_infra = get_items(context_data, ["snapshots", "infra-labels"]) + for infra_line in schedule_infra: + object_name = get_items(infra_line, ["object", "metadata", "name"]) + object_kind = get_items(infra_line, ["object", "kind"]) + object_status = get_items(infra_line, ["object", "status", "status"]) + object_spec = get_items(infra_line, ["object", "spec"]) + set_cmd = get_set_parameters(infra_line) + + if not object_spec: + print(f"{object_kind}/{object_name} spec is empty, not support operator") + continue + + if check_labels(infra_line): + print(f"{object_kind}/{object_name} labels is terminated, disallowed operation") + continue + + # object_status = InfraKubernetes(object_name).get_infra_status() + if object_status == RunningWorkflow: + continue + + if object_status == WorkflowFailed: + command = get_last_command(infra_line) + InfraController(object_name, object_spec).schedule_infra(command) + + if object_status == Insufficiency or object_status == Unhealthy: + command = f"kdp install {set_cmd}" + if RETRY_NUM > MAX_INSTALL_NUM: + command = f"kdp install --force-reinstall {set_cmd}" + InfraController(object_name, object_spec).schedule_infra(command) + + command = f"kdp install {set_cmd}" + InfraController(object_name, object_spec).schedule_infra(command) + if object_type == "Synchronization": +# object_watch_type = get_items(context_data, ["watchEvent"]) + shutil.copy(HOOK_RESULT_FILE, "/tmp/Synchronization.json") + sync_infra = get_items(context_data, ["objects"]) + for infra_line in sync_infra: + object_name = get_items(infra_line, ["object", "metadata", "name"]) + object_kind = get_items(infra_line, ["object", "kind"]) + object_spec = get_items(infra_line, ["object", "spec"]) + set_cmd = get_set_parameters(infra_line) + + set_cmd = f" --force-reinstall {set_cmd}" + if check_labels(infra_line): + print(f"{object_kind}/{object_name} labels is terminated, disallowed operation") + continue + InfraController(object_name, object_spec).create_infra(set_cmd) + # print("synchronization, not support operator") + + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "--config": + print("""configVersion: v1 +schedule: + - name: 'schedule-infra' + crontab: '*/4 * * * *' + allowFailure: true + queue: 'schedule-queue' + includeSnapshotsFrom: ['infra-labels'] +kubernetes: + - name: 'infra-labels' + apiVersion: 'installer.kdp.io/v1alpha1' + kind: 'Infrastructure' + allowFailure: true + jqFilter: '.metadata.labels' + - name: 'infra-spec' + apiVersion: 'installer.kdp.io/v1alpha1' + kind: 'Infrastructure' + allowFailure: true + jqFilter: '.spec'""") + else: + infra_operator() diff --git a/helm/CHANGELOG.md b/helm/CHANGELOG.md new file mode 100644 index 0000000..e69de29 diff --git a/helm/CHANGELOG_zh.md b/helm/CHANGELOG_zh.md new file mode 100644 index 0000000..e69de29 diff --git a/helm/README.md b/helm/README.md new file mode 100644 index 0000000..a1436e4 --- /dev/null +++ b/helm/README.md @@ -0,0 +1,24 @@ + + Full documentation is available at [KDP Website](https://linktimecloud.github.io/kubernetes-data-platform/). + +## Introduction +KDP(Kubernetes Data Platform) delivers a modern, hybrid and cloud-native data platform based on Kubernetes. It leverages the cloud-native capabilities of Kubernetes to manage data platform effectively. + +![kdp-arch](https://linktime-public.oss-cn-qingdao.aliyuncs.com/linktime-homepage/kdp/kdp-archi-en.png) + +## Highlights +* Out-of-the-box Kubernetes data platform with: + * K8s-native integration and optimization of mainstream big data computing and storage engines + * The standardized configuration management of big data components which simplifies the complexity of configuration dependency management of big data components +* Standardized big data application integration framework with: + * The application delivery engine based on [OAM](https://oam.dev/) which simplifies the delivery and development of big data applications + * Scalable application layer operation and maintenance capabilities: observability, elastic scaling, gray scale publishing, etc +* Model concept of big data cluster and application catalog: + * Big Data cluster: Manage big data components in the form of "cluster" on K8s, providing unified life cycle management of big data applications in the same big data cluster + * Application Catalog: Combines individual big data components into an application catalog, providing a unified management view from the application layer to the container layer + + +## Usage Plan +* Obtain the storageClass name of the Kubernetes cluster storage and update the extension component configuration persistence.storageClass. +* Update the extension component configuration persistence.accessModes according to the operations supported by the storageClass of the Kubernetes cluster storage. +* Update the extension component configuration global.ingress.domain according to the domain name of the Kubernetes cluster ingress. diff --git a/helm/README_zh.md b/helm/README_zh.md new file mode 100644 index 0000000..174b4a4 --- /dev/null +++ b/helm/README_zh.md @@ -0,0 +1,24 @@ + +完整的文档可访问 [KDP 网站](https://linktimecloud.github.io/kubernetes-data-platform/README_zh.html). + +## 简介 + +KDP(Kubernetes Data Platform) 提供了一个基于 Kubernetes 的现代化混合云原生数据平台。它能够利用 Kubernetes 的云原生能力来有效地管理数据平台。 + +![kdp-arch](https://linktime-public.oss-cn-qingdao.aliyuncs.com/linktime-homepage/kdp/kdp-archi-en.png) + +## 亮点 +* 开箱即用的 Kubernetes 大数据平台: + * 主流大数据计算、存储引擎的 K8s 化改造及优化 + * 大数据组件的标准化配置管理,简化了大数据组件配置依赖管理的复杂性 +* 提供标准化的大数据应用集成框架: + * 基于[OAM](https://oam.dev/)的应用交付引擎,简化大数据应用的交付和开发 + * 可扩展的应用层运维能力:可观测性、弹性伸缩、灰度发布等 +* 大数据集群及应用目录的模型概念: + * 大数据集群:在K8s上以“集群”的形式管理大数据组件,提供同一个大数据集群下大数据应用统一的生命周期管理 + * 应用目录:将相关的单体大数据组件组合成一个应用目录,提供从应用层到容器层的统一管理视图 + +## 使用方案 +* 获取Kubernetes集群存储storageClass名称,更新扩展组件配置配置 persistence.storageClass +* 依据Kubernetes集群存储storageClass支持的操作, 更新扩展组件配置配置persistence.accessModes +* 依据Kubernetes集群ingress的域名,更新扩展组件配置配置global.ingress.domain diff --git a/helm/charts/kdp-infra/.helmignore b/helm/charts/kdp-infra/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/helm/charts/kdp-infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/charts/kdp-infra/Chart.yaml b/helm/charts/kdp-infra/Chart.yaml new file mode 100644 index 0000000..a62fab5 --- /dev/null +++ b/helm/charts/kdp-infra/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: kdp-infra +description: A Helm chart for Kubernetes-data-platform infra installer + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.0.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 1.0.0 diff --git a/helm/charts/kdp-infra/README.md b/helm/charts/kdp-infra/README.md new file mode 100644 index 0000000..f631041 --- /dev/null +++ b/helm/charts/kdp-infra/README.md @@ -0,0 +1,79 @@ +[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +# KDP helm chart + +kdp is the installation service for the kdp (Kubernetes Data Platform) management platform. Using the kdp chart, kdp can be deployed in a Kubernetes cluster. + +To pull this chart from the repository, + +```bash +helm pull oci://registry-1.docker.io/linktimecloud/kdp-chart --version 1.0.0-rc1 +``` + +Other Commands, + +```bash +helm show all oci://registry-1.docker.io/linktimecloud/kdp --version 1.0.0-rc1 +helm template oci://registry-1.docker.io/linktimecloud/kdp --version 1.0.0-rc1 +helm install oci://registry-1.docker.io/linktimecloud/kdp --version 1.0.0-rc1 +helm upgrade oci://registry-1.docker.io/linktimecloud/kdp --version +``` + +## Prerequisites +- Kubernetes 1.23+ + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------------ | --------------------------- | ------------ | +| `global.ingress.class` | Ingress class | `kong` | +| `global.ingress.domain` | Domain name for ingress | `kdp-e2e.io` | +| `global.ingress.tlsSecretName` | TLS secret name for ingress | `""` | + +### kdp parameters + +| Name | Description | Value | +| ----------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| `image.registry` | Image registry | `od-registry.linktimecloud.com` | +| `image.repository` | Image repository | `kdp` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.tag` | Image tag | `""` | +| `image.digest` | Image digest | `sha256:5e00383433dbe9d05807dfa67cacad03cf0248960868bcc61d6c86e2876dacbf` | +| `imagePullSecrets` | Image pull secrets | `[]` | +| `nameOverride` | String to partially override kdp.fullname | `""` | +| `fullnameOverride` | String to fully override kdp.name | `""` | +| `serviceAccount.create` | Create serviceAccount | `true` | +| `serviceAccount.automount` | Automatically mount a ServiceAccount's API credentials? | `true` | +| `serviceAccount.annotations` | Annotations to add to the service account | `{}` | +| `serviceAccount.name` | The name of the service account to use. | `""` | +| `podAnnotations` | | `{}` | +| `podLabels` | | `{}` | +| `service.port` | Service port | `9115` | +| `resources` | | `{}` | +| `volumes` | Volumes for the output Deployment definition | `[]` | +| `volumeMounts` | Volume mounts for the output Deployment definition | `[]` | +| `nodeSelector` | Node selector for pod assignment | `{}` | +| `affinity` | Affinity for pod assignment | `{}` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for the liveness probe | `5` | +| `livenessProbe.periodSeconds` | Period seconds for the liveness probe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for the liveness probe | `1` | +| `livenessProbe.failureThreshold` | Failure threshold for the liveness probe | `3` | +| `livenessProbe.successThreshold` | Success threshold for the liveness probe | `1` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for the readiness probe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for the readiness probe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for the readiness probe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for the readiness probe | `3` | +| `readinessProbe.successThreshold` | Success threshold for the readiness probe | `1` | +| `metrics.serviceMonitor.enabled` | Enable the ServiceMonitor resource for Prometheus Operator | `true` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | +| `persistence.storageClass` | PVC Storage Class for MinIO® data volume | `default` | +| `persistence.accessModes` | PVC Access Modes for MinIO® data volume | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for MinIO® data volume | `1Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `installConfig.kdpRepo` | kdp repo url | `https://gitee.com/linktime-cloud/kubernetes-data-platform.git` | +| `installConfig.kdpRepoRef` | kdp repo ref | `release-1.2` | +| `installConfig.setParameters` | setParameters | `[]` | diff --git a/helm/charts/kdp-infra/crds/installer.kdp.io.yaml b/helm/charts/kdp-infra/crds/installer.kdp.io.yaml new file mode 100644 index 0000000..64036e6 --- /dev/null +++ b/helm/charts/kdp-infra/crds/installer.kdp.io.yaml @@ -0,0 +1,89 @@ +--- +# See: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: infrastructures.installer.kdp.io + labels: + installer: infra +spec: + # group name to use for REST API: /apis// + group: installer.kdp.io + # list of versions supported by this CustomResourceDefinition + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: status + type: string + + name: v1alpha1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + subresources: + status: { } + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + properties: + kdpRepo: + type: string + default: nil + kdpRepoRef: + type: string + default: nil + artifactServer: + type: string + default: nil + helmRepository: + type: string + default: nil + dockerRegistry: + type: string + default: nil + forceReinstall: + type: boolean + default: false + setParameters: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + # either Cluster + scope: Cluster + names: + # plural name to be used in the URL: /apis/// + plural: infrastructures + # singular name to be used as an alias on the CLI and for display + singular: infrastructure + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: Infrastructure + # shortNames allow shorter string to match your resource on the CLI + shortNames: + - infra \ No newline at end of file diff --git a/helm/charts/kdp-infra/templates/NOTES.txt b/helm/charts/kdp-infra/templates/NOTES.txt new file mode 100644 index 0000000..e7bb484 --- /dev/null +++ b/helm/charts/kdp-infra/templates/NOTES.txt @@ -0,0 +1 @@ +Kubernetes data platform installation has triggered, please wait for the installation to complete diff --git a/helm/charts/kdp-infra/templates/PersistentVolumeClaim.yaml b/helm/charts/kdp-infra/templates/PersistentVolumeClaim.yaml new file mode 100644 index 0000000..5a403ea --- /dev/null +++ b/helm/charts/kdp-infra/templates/PersistentVolumeClaim.yaml @@ -0,0 +1,33 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "kdp-infra.fullname" .}}-pvc + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kdp-infra.name" .}} + chart: {{ include "kdp-infra.name" .}} + release: {{ .Release.Name }} + monitoring: "true" + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if or .Values.persistence.annotations}} + annotations: + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + storageClassName: {{ .Values.persistence.storageClass | quote }} + diff --git a/helm/charts/kdp-infra/templates/Service.yaml b/helm/charts/kdp-infra/templates/Service.yaml new file mode 100644 index 0000000..cdc296f --- /dev/null +++ b/helm/charts/kdp-infra/templates/Service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kdp-infra.fullname" .}} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kdp-infra.name" .}} + chart: {{ include "kdp-infra.name" .}} + release: {{ .Release.Name }} + monitoring: "true" + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: metrics + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ include "kdp-infra.name" . }} + release: {{ .Release.Name }} \ No newline at end of file diff --git a/helm/charts/kdp-infra/templates/_helpers.tpl b/helm/charts/kdp-infra/templates/_helpers.tpl new file mode 100644 index 0000000..461bb31 --- /dev/null +++ b/helm/charts/kdp-infra/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kdp-infra.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-"}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kdp-infra.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kdp-infra.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kdp-infra.labels" -}} +helm.sh/chart: {{ include "kdp-infra.chart" . }} +{{ include "kdp-infra.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kdp-infra.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kdp-infra.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kdp-infra.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "kdp-infra.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + + +{{- /* +Create the image to use +*/}} +{{- define "kdp-infra.image" -}} +{{- $registry := .Values.image.registry -}} +{{- $tag := ternary (printf ":%s" (.Values.image.tag | default "latest" | toString)) (printf "@%s" .Values.image.digest) (eq .Values.image.digest "") -}} +{{- if eq .Values.image.registry "" -}} + {{- printf "%s%s" .Values.image.repository $tag -}} +{{- else }} + {{- printf "%s/%s%s" .Values.image.registry .Values.image.repository $tag -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/helm/charts/kdp-infra/templates/deployment.yaml b/helm/charts/kdp-infra/templates/deployment.yaml new file mode 100644 index 0000000..ce0499e --- /dev/null +++ b/helm/charts/kdp-infra/templates/deployment.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kdp-infra.fullname" . }}-controller + labels: + app: {{ include "kdp-infra.name" . }} + release: {{ .Release.Name }} + {{- include "kdp-infra.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "kdp-infra.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kdp-infra.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kdp-infra.serviceAccountName" . }} + containers: + - name: {{ .Chart.Name }} + image: {{ include "kdp-infra.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + port: {{ .Values.service.port }} + path: /metrics + scheme: HTTP + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + httpGet: + port: {{ .Values.service.port }} + path: /metrics + scheme: HTTP + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: kdp-src + mountPath: /root/.kdp + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: kdp-src + persistentVolumeClaim: + claimName: {{ include "kdp-infra.fullname" .}}-pvc + readOnly: false + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/charts/kdp-infra/templates/installer.yaml b/helm/charts/kdp-infra/templates/installer.yaml new file mode 100644 index 0000000..513d22a --- /dev/null +++ b/helm/charts/kdp-infra/templates/installer.yaml @@ -0,0 +1,24 @@ +apiVersion: installer.kdp.io/v1alpha1 +kind: Infrastructure +metadata: + name: kdp-installer +spec: + kdpRepo: {{ .Values.installConfig.kdpRepo | quote }} + kdpRepoRef: {{ .Values.installConfig.kdpRepoRef | quote }} + setParameters: + {{- $staticParams := dict + "ingress.class" .Values.global.ingress.class + "ingress.domain" .Values.global.ingress.domain }} + {{- if ne .Values.global.ingress.tlsSecretName "" }} + {{- $_ := set $staticParams "ingress.tlsSecretName" .Values.global.ingress.tlsSecretName }} + {{- end }} + + {{- $allParams := list }} + {{- range .Values.installConfig.setParameters }} + {{- $allParams = append $allParams . }} + {{- end }} + {{- range $k, $v := $staticParams }} + {{- $allParams = append $allParams (dict "name" $k "value" $v) }} + {{- end }} + + {{- toYaml $allParams | nindent 2 }} diff --git a/helm/charts/kdp-infra/templates/rbac.yaml b/helm/charts/kdp-infra/templates/rbac.yaml new file mode 100644 index 0000000..196456f --- /dev/null +++ b/helm/charts/kdp-infra/templates/rbac.yaml @@ -0,0 +1,38 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kdp-infra.serviceAccountName" . }} + labels: + {{- include "kdp-infra.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kdp-infra.serviceAccountName" . }}-cr +rules: + - verbs: + - '*' + apiGroups: + - '*' + resources: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-crd-kdp-crb +subjects: + - kind: ServiceAccount + name: {{ include "kdp-infra.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "kdp-infra.serviceAccountName" . }}-cr +{{- end }} diff --git a/helm/charts/kdp-infra/values.yaml b/helm/charts/kdp-infra/values.yaml new file mode 100644 index 0000000..9d15987 --- /dev/null +++ b/helm/charts/kdp-infra/values.yaml @@ -0,0 +1,158 @@ +## @section Global parameters +global: + ingress: + ## @param global.ingress.class Ingress class + class: "kong" + ## @param global.ingress.domain Domain name for ingress + domain: "kdp-e2e.io" + ## @param global.ingress.tlsSecretName TLS secret name for ingress + tlsSecretName: "" +# Default values for kdp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## @section kdp parameters +image: + ## @param image.registry Image registry + registry: "od-registry.linktimecloud.com" + ## @param image.repository Image repository + repository: kdp + ## @param image.pullPolicy Image pull policy + # IfNotPresent, Always + pullPolicy: Always + ## @param image.tag Image tag + tag: "1.0.0" + ## @param image.digest Image digest + digest: "" + +## @param imagePullSecrets Image pull secrets +imagePullSecrets: [] +## @param nameOverride String to partially override kdp.fullname +nameOverride: "" +## @param fullnameOverride String to fully override kdp.name +fullnameOverride: "" + +## @param serviceAccount.create Create serviceAccount +## @param serviceAccount.automount Automatically mount a ServiceAccount's API credentials? +## @param serviceAccount.annotations Annotations to add to the service account +## @param serviceAccount.name The name of the service account to use. +serviceAccount: + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + annotations: {} + # If not set and create is true, a name is generated using the fullname template + name: "" + +## @param podAnnotations +podAnnotations: {} +## @param podLabels +podLabels: {} + +## @param service.port Service port +service: + port: "9115" + +## @param resources +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## @param volumes Volumes for the output Deployment definition +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +## @param volumeMounts Volume mounts for the output Deployment definition +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +## @param nodeSelector Node selector for pod assignment +nodeSelector: {} + +## @param affinity Affinity for pod assignment +affinity: {} + +## @param livenessProbe.initialDelaySeconds Initial delay seconds for the liveness probe +## @param livenessProbe.periodSeconds Period seconds for the liveness probe +## @param livenessProbe.timeoutSeconds Timeout seconds for the liveness probe +## @param livenessProbe.failureThreshold Failure threshold for the liveness probe +## @param livenessProbe.successThreshold Success threshold for the liveness probe +livenessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + +## @param readinessProbe.initialDelaySeconds Initial delay seconds for the readiness probe +## @param readinessProbe.periodSeconds Period seconds for the readiness probe +## @param readinessProbe.timeoutSeconds Timeout seconds for the readiness probe +## @param readinessProbe.failureThreshold Failure threshold for the readiness probe +## @param readinessProbe.successThreshold Success threshold for the readiness probe +readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + +## @param metrics.serviceMonitor.enabled Enable the ServiceMonitor resource for Prometheus Operator +## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus +## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped +metrics: + serviceMonitor: + enabled: true + additionalLabels: {} + interval: 30s + +persistence: + ## @param persistence.enabled Enable persistence using Persistent Volume Claims + enabled: true + ## @param persistence.storageClass PVC Storage Class for MinIO® data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "default" + ## @param persistence.accessModes PVC Access Modes for MinIO® data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MinIO® data volume + ## + size: 1Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + +installConfig: + ## @param installConfig.kdpRepo kdp repo url + kdpRepo: "https://gitee.com/linktime-cloud/kubernetes-data-platform.git" + ## @param installConfig.kdpRepoRef kdp repo ref + kdpRepoRef: "release-1.2" + ## @param installConfig.setParameters setParameters + setParameters: [] + ## e.g: + ## - name: "dnsService.name" + ## value: "kube-dns" + ## - name: "kong.enabled" + ## value: "true" + ## - name: "prometheus.enabled" + ## value: "true" + ## - name: "loki.enabled" + ## value: "true" \ No newline at end of file diff --git a/helm/charts/web/.helmignore b/helm/charts/web/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/helm/charts/web/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/charts/web/Chart.yaml b/helm/charts/web/Chart.yaml new file mode 100644 index 0000000..1c81eaf --- /dev/null +++ b/helm/charts/web/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: web +description: A Helm chart for Kubernetes-data-platform web + +type: application + +version: 1.2.0 diff --git a/helm/charts/web/templates/_helpers.tpl b/helm/charts/web/templates/_helpers.tpl new file mode 100644 index 0000000..cc3055d --- /dev/null +++ b/helm/charts/web/templates/_helpers.tpl @@ -0,0 +1,35 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "web.name" -}} +{{- default .Chart.Name | trunc 63 | trimSuffix "-"}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "web.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "web.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "web.ingress.domain" -}} +{{- default "kdp-e2e.io" .Values.global.ingress.domain | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/helm/charts/web/templates/configmap.yaml b/helm/charts/web/templates/configmap.yaml new file mode 100644 index 0000000..8531767 --- /dev/null +++ b/helm/charts/web/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kdp-{{ include "web.fullname" . }} +data: + index.js: | + System.register(["@kubed/components","react","react-router-dom"],(function(t,e){var r={},n={},o={};return{setters:[function(t){r.Loading=t.Loading},function(t){n.default=t.default,n.useEffect=t.useEffect},function(t){o.useNavigate=t.useNavigate}],execute:function(){t(function(){var t={189:function(t,e,r){var n={"./base.json":133};function o(t){var e=i(t);return r(e)}function i(t){if(!r.o(n,t)){var e=new Error("Cannot find module '"+t+"'");throw e.code="MODULE_NOT_FOUND",e}return n[t]}o.keys=function(){return Object.keys(n)},o.resolve=i,t.exports=o,o.id=189},134:function(t,e,r){var n={"./base.json":366};function o(t){var e=i(t);return r(e)}function i(t){if(!r.o(n,t)){var e=new Error("Cannot find module '"+t+"'");throw e.code="MODULE_NOT_FOUND",e}return n[t]}o.keys=function(){return Object.keys(n)},o.resolve=i,t.exports=o,o.id=134},725:function(t,e,r){var n=r(825).y;e.w=function(t){if(t||(t=1),!r.y.meta||!r.y.meta.url)throw console.error("__system_context__",r.y),Error("systemjs-webpack-interop was provided an unknown SystemJS context. Expected context.meta.url, but none was provided");r.p=n(r.y.meta.url,t)}},825:function(t,e,r){function n(t,e){var r=document.createElement("a");r.href=t;for(var n="/"===r.pathname[0]?r.pathname:"/"+r.pathname,o=0,i=n.length;o!==e&&i>=0;){"/"===n[--i]&&o++}if(o!==e)throw Error("systemjs-webpack-interop: rootDirectoryLevel ("+e+") is greater than the number of directories ("+o+") in the URL path "+t);var c=n.slice(0,i+1);return r.protocol+"//"+r.host+c}e.y=n;var o=Number.isInteger||function(t){return"number"==typeof t&&isFinite(t)&&Math.floor(t)===t}},761:function(t){"use strict";t.exports=r},726:function(t){"use strict";t.exports=n},965:function(t){"use strict";t.exports=o},133:function(t){"use strict";t.exports={name:"Name"}},366:function(t){"use strict";t.exports={name:"名称"}}},i={};function c(e){var r=i[e];if(void 0!==r)return r.exports;var n=i[e]={exports:{}};return t[e](n,n.exports,c),n.exports}c.y=e,c.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),c.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},function(){var t;c.g.importScripts&&(t=c.g.location+"");var e=c.g.document;if(!t&&e&&(e.currentScript&&(t=e.currentScript.src),!t)){var r=e.getElementsByTagName("script");if(r.length)for(var n=r.length-1;n>-1&&(!t||!/^http(s?):/.test(t));)t=r[n--].src}if(!t)throw new Error("Automatic publicPath is not supported in this browser");t=t.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),c.p=t}();return(0,c(725).w)(1),function(){"use strict";var t=c(726),e=c(965),r=c(761),n="http://kdp-ux.{{ include "web.ingress.domain" .}}/#/bigDataClusterOverview";function o(){console.log("Rendering component with user:",n);var o=(0,e.useNavigate)();return(0,t.useEffect)((function(){window.open(n),o(-1,{replace:!0})}),[]),t.default.createElement(r.Loading,{className:"page-loading"})}var i=[{path:"/kdp",element:t.default.createElement(o,null)}];function u(t){return u="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},u(t)}function s(t){var e=function(t,e){if("object"!=u(t)||!t)return t;var r=t[Symbol.toPrimitive];if(void 0!==r){var n=r.call(t,e||"default");if("object"!=u(n))return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===e?String:Number)(t)}(t,"string");return"symbol"==u(e)?e:e+""}function a(t,e,r){return(e=s(e))in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}function f(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);e&&(n=n.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),r.push.apply(r,n)}return r}function p(t){for(var e=1;e=1.26.3-0" +ksVersion: ">=4.0.0-0" +maintainers: + - name: "linktimecloud" + email: "info@linktime.cloud" + url: "https://www.linktimecloud.com/" +provider: + zh: + name: "武汉智领云科技有限公司" + email: "info@linktime.cloud" + url: "https://en.linktimecloud.com/" + en: + name: "LinkTime Cloud" + email: "info@linktime.cloud" + url: "https://www.linktimecloud.com/" +icon: ./static/kdp.svg +screenshots: # 扩展组件截图(可选项) + - ./static/screenshots/kdp.png + - ./static/screenshots/kdp-ux.png + - ./static/screenshots/kdp-bdc.png +dependencies: + - name: web + tags: + - extension + - name: kdp-infra + tags: + - extension +# installationMode describes how to install subcharts, it can be HostOnly or Multicluster. +# In Multicluster mode, the subchart with tag `extension` will only be deployed to the host cluster, +# and the subchart with tag `agent` will be deployed to all selected clusters. +installationMode: HostOnly +# Custom namespace example: If not specified, it will be installed in the namespace named extension-{name}. +#namespace: "" + diff --git a/helm/permissions.yaml b/helm/permissions.yaml new file mode 100644 index 0000000..2e19643 --- /dev/null +++ b/helm/permissions.yaml @@ -0,0 +1,55 @@ +kind: ClusterRole +rules: + - verbs: + - '*' + apiGroups: + - 'extensions.kubesphere.io' + resources: + - '*' + - apiGroups: + - 'apiextensions.k8s.io' + resources: + - 'customresourcedefinitions' + verbs: + - '*' + - apiGroups: + - 'rbac.authorization.k8s.io' + resources: + - 'clusterroles' + - 'clusterrolebindings' + verbs: + - '*' + - apiGroups: + - '' + resources: + - 'namespaces' + verbs: + - '*' + - apiGroups: + - 'installer.kdp.io' + resources: + - 'infrastructures' + verbs: + - '*' + + +--- +kind: Role +rules: + - verbs: + - '*' + apiGroups: + - '' + - 'apps' + - 'batch' + - 'app.k8s.io' + - 'autoscaling' + resources: + - '*' + - verbs: + - '*' + apiGroups: + - 'networking.k8s.io' + resources: + - 'ingresses' + - 'networkpolicies' diff --git a/helm/static/kdp.svg b/helm/static/kdp.svg new file mode 100644 index 0000000..374edc0 --- /dev/null +++ b/helm/static/kdp.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/helm/static/screenshots/kdp-bdc.png b/helm/static/screenshots/kdp-bdc.png new file mode 100644 index 0000000..953ec9e Binary files /dev/null and b/helm/static/screenshots/kdp-bdc.png differ diff --git a/helm/static/screenshots/kdp-ux.png b/helm/static/screenshots/kdp-ux.png new file mode 100644 index 0000000..f45dbd9 Binary files /dev/null and b/helm/static/screenshots/kdp-ux.png differ diff --git a/helm/static/screenshots/kdp.png b/helm/static/screenshots/kdp.png new file mode 100644 index 0000000..8612b93 Binary files /dev/null and b/helm/static/screenshots/kdp.png differ diff --git a/helm/values.yaml b/helm/values.yaml new file mode 100644 index 0000000..e1e30cb --- /dev/null +++ b/helm/values.yaml @@ -0,0 +1,153 @@ +global: + ingress: + class: "kong" + domain: "kdp-e2e.io" + tlsSecretName: "" + +kdp-infra: + # Default values for kdp. + + ## @section kdp parameters + ## @param image.registry Image registry + ## @param image.repository Image repository + ## @param image.pullPolicy Image pull policy + ## @param image.tag Image tag + ## @param image.digest Image digest + image: + registry: "od-registry.linktimecloud.com" + repository: kdp + # IfNotPresent, Always + pullPolicy: Always + tag: "" + digest: "" + + ## @param imagePullSecrets Image pull secrets + imagePullSecrets: [] + ## @param nameOverride String to partially override kdp.fullname + nameOverride: "" + ## @param fullnameOverride String to fully override kdp.name + fullnameOverride: "" + + ## @param serviceAccount.create Create serviceAccount + ## @param serviceAccount.automount Automatically mount a ServiceAccount's API credentials? + ## @param serviceAccount.annotations Annotations to add to the service account + ## @param serviceAccount.name The name of the service account to use. + serviceAccount: + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + annotations: {} + # If not set and create is true, a name is generated using the fullname template + name: "" + + ## @param podAnnotations + podAnnotations: {} + ## @param podLabels + podLabels: {} + + ## @param service.port Service port + service: + port: "9115" + + ## @param resources + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + ## @param volumes Volumes for the output Deployment definition + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + ## @param volumeMounts Volume mounts for the output Deployment definition + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true + + ## @param nodeSelector Node selector for pod assignment + nodeSelector: {} + + ## @param affinity Affinity for pod assignment + affinity: {} + + ## @param livenessProbe.initialDelaySeconds Initial delay seconds for the liveness probe + ## @param livenessProbe.periodSeconds Period seconds for the liveness probe + ## @param livenessProbe.timeoutSeconds Timeout seconds for the liveness probe + ## @param livenessProbe.failureThreshold Failure threshold for the liveness probe + ## @param livenessProbe.successThreshold Success threshold for the liveness probe + livenessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## @param readinessProbe.initialDelaySeconds Initial delay seconds for the readiness probe + ## @param readinessProbe.periodSeconds Period seconds for the readiness probe + ## @param readinessProbe.timeoutSeconds Timeout seconds for the readiness probe + ## @param readinessProbe.failureThreshold Failure threshold for the readiness probe + ## @param readinessProbe.successThreshold Success threshold for the readiness probe + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## @param metrics.serviceMonitor.enabled Enable the ServiceMonitor resource for Prometheus Operator + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + metrics: + serviceMonitor: + enabled: true + additionalLabels: {} + interval: 30s + + persistence: + enabled: true + ## @param persistence.storageClass PVC Storage Class for MinIO® data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "standard" + ## @param persistence.accessModes PVC Access Modes for MinIO® data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MinIO® data volume + ## + size: 1Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + + installConfig: + ## @param installConfig.kdpRepo kdp repo url + kdpRepo: "https://gitee.com/linktime-cloud/kubernetes-data-platform.git" + ## @param installConfig.kdpRepoRef kdp repo ref + kdpRepoRef: "release-1.2" + ## @param installConfig.setParameters setParameters + setParameters: + - name: "dnsService.name" + value: "kube-dns" + - name: "kong.enabled" + value: "true" + - name: "prometheus.enabled" + value: "true" + - name: "loki.enabled" + value: "true" \ No newline at end of file diff --git a/kdp.Dockerfile b/kdp.Dockerfile new file mode 100644 index 0000000..aa27bbb --- /dev/null +++ b/kdp.Dockerfile @@ -0,0 +1,26 @@ +ARG TARGETARCH +FROM --platform=linux/${TARGETARCH} ghcr.io/flant/shell-operator:latest AS builder +RUN apk --no-cache add python3 \ + && python3 -m venv --copies /venv + +FROM builder AS pip-package +ENV PATH="/venv/bin:$PATH" + +RUN pip install --no-cache --upgrade pip kubernetes==26.1.0 jsonpath==0.82.2 deepdiff==7.0.1 + + +FROM builder AS image +ARG TARGETARCH +ARG VERSION +ENV KDP_ROOT_DIR=${KDP_ROOT_DIR:-.kdp} +ENV PATH="/venv/bin:$PATH" + +ADD docker/hooks/* /hooks +ADD cmd/output/${VERSION}/kdp-linux-$TARGETARCH /usr/local/bin/kdp + +COPY --from=pip-package /venv /venv + + +RUN chmod +x /hooks/* \ + && cd $HOME \ + && mkdir $KDP_ROOT_DIR \ No newline at end of file diff --git a/makefiles/build-helm-package.mk b/makefiles/build-helm-package.mk new file mode 100644 index 0000000..ab1ef4d --- /dev/null +++ b/makefiles/build-helm-package.mk @@ -0,0 +1,26 @@ +##@ Helm package +HELM_CHART ?= kdp-infra +HELM_CHART_VERSION ?= $(VERSION) +KUBESPHERE_EXTENSION ?= helm + +.PHONY: helm-package +helm-package: ## Helm package + cd helm/charts && $(HELMBIN) package $(HELM_CHART) --version $(HELM_CHART_VERSION) --app-version $(HELM_CHART_VERSION) + + +.PHONY: kubesphere-helm-package +kubesphere-helm-package: ## Kubesphere Helm package + @echo $(VERSION) + @sed -i.bak "s/1\.2\.0/$(VERSION)/g" ./helm/extension.yaml + $(KSBUILDBIN) package $(KUBESPHERE_EXTENSION) + mv ./helm/extension.yaml.bak ./helm/extension.yaml + + +.PHONY: helm-doc +helm-doc: ## Helm doc + cd helm/charts && $(HELMBIN) docs $(HELM_CHART) + + +.PHONY: helm-doc-gen +helm-doc-gen: helm-doc ## helm-doc-gen: Generate helm chart README.md + readme-generator -v helm/charts/$(HELM_CHART)/values.yaml -r helm/charts/$(HELM_CHART)/README.md \ No newline at end of file diff --git a/makefiles/build.mk b/makefiles/build.mk index 358043a..9e82b7a 100644 --- a/makefiles/build.mk +++ b/makefiles/build.mk @@ -1,6 +1,10 @@ ##@ Docker image info IMG ?= linktimecloud/kubernetes-data-platform:$(VERSION) +KDP_IMG ?= linktimecloud/kdp:$(VERSION) IMG_REGISTRY ?= "" +OUTPUT_TYPE := registry +TARGETARCHS := amd64 arm64 +ALL_OS_ARCH := linux-arm64 linux-amd64 ##@ Build docker image .PHONY: docker-build @@ -41,3 +45,34 @@ kdp-cli-push: .PHONY: kdp-cli-clean kdp-cli-clean: rm -rf ./cmd/output/$(VERSION) + + +##@ Build infra image +.PHONY: kdp-infra-build +kdp-infra-build: + CGO_ENABLED=0 GOOS=linux GOARCH=$(TARGETARCH) go build -ldflags "-X kdp/cmd.CliVersion=$(VERSION) -X kdp/cmd.CliGoVersion=$(GO_VERSION) -X kdp/cmd.CliGitCommit=$(GIT_COMMIT) -X \"kdp/cmd.CliBuiltAt=$(BUILD_DATE)\" -X \"kdp/cmd.CliOSArch=linux/$(TARGETARCH)\"" -o ./cmd/output/$(VERSION)/kdp-linux-$(TARGETARCH); \ + docker buildx build \ + --output=type=$(OUTPUT_TYPE) \ + --platform linux/$(TARGETARCH) \ + --provenance false \ + --build-arg VERSION=$(VERSION) \ + --build-arg TARGETARCH=$(TARGETARCH) \ + -t $(IMG_REGISTRY)/$(KDP_IMG)-linux-$(TARGETARCH) \ + -f kdp.Dockerfile . + @$(OK) + + +##@ push infra image +.PHONY: publish +publish: + docker manifest create --amend $(IMG_REGISTRY)/$(KDP_IMG) $(foreach osarch, $(ALL_OS_ARCH), $(IMG_REGISTRY)/$(KDP_IMG)-${osarch}) + docker manifest push --purge $(IMG_REGISTRY)/$(KDP_IMG) + docker manifest inspect $(IMG_REGISTRY)/$(KDP_IMG) + + +##@ Build multi-arch image +.PHONY: multi-arch-builder +multi-arch-builder: + for arch in $(TARGETARCHS); do \ + TARGETARCH=$${arch} $(MAKE) kdp-infra-build;\ + done diff --git a/makefiles/dependency.mk b/makefiles/dependency.mk index c780149..14197eb 100644 --- a/makefiles/dependency.mk +++ b/makefiles/dependency.mk @@ -50,3 +50,58 @@ STATIC_CHECK=$(GOBIN)/staticcheck else STATIC_CHECK=$(shell which staticcheck) endif + +HELM_VERSION ?= helm-v3.6.0-linux-amd64.tar.gz +.PHONY: helm +helm: ## Download helm cli locally if necessary. +ifeq (, $(shell which helm)) + @{ \ + set -e ;\ + echo 'installing $(HELM_VERSION)' ;\ + wget $(ARTIFACTS_SERVER)/$(HELM_VERSION) ;\ + tar -zxvf $(HELM_VERSION) ;\ + mv linux-amd64/helm /bin/helm ;\ + rm -f $(HELM_VERSION) ;\ + rm -rf linux-amd64 ;\ + echo 'Successfully installed' ;\ + } +else + @$(OK) Helm CLI is already installed +HELMBIN=$(shell which helm) +endif + + +.PHONY: helm-doc +helm-doc: ## Install helm-doc locally if necessary. +ifeq (, $(shell which readme-generator)) + @{ \ + set -e ;\ + echo 'installing readme-generator-for-helm' ;\ + npm install -g @bitnami/readme-generator-for-helm ;\ + } +else + @$(OK) readme-generator-for-helm is already installed +HELM_DOC=$(shell which readme-generator) +endif + + +KSBUILDER_VERSION ?= ksbuilder_0.4.2_linux_amd64.tar.gz +.PHONY: ksbuilder +ksbuilder: +## Download helm cli locally if necessary. +ifeq (, $(shell which ksbuilder)) + @{ \ + set -e ;\ + echo 'installing $(KSBUILDER_VERSION)' ;\ + wget https://github.com/kubesphere/ksbuilder/releases/download/v0.4.2/$(KSBUILDER_VERSION) ;\ + mkdir -p ./ksbuilder + tar -zxvf $(KSBUILDER_VERSION) -C ./ksbuilder;\ + mv ./ksbuilder/ksbuilder /bin/ksbuilder ;\ + rm -f $(HELM_VERSION) ;\ + rm -rf ./ksbuilder ;\ + echo 'Successfully installed' ;\ + } +else + @$(OK) ksbuilder CLI is already installed +KSBUILDBIN=$(shell which ksbuilder) +endif