From 0c160f5c217cac00f878182bc3fd6a04758acff3 Mon Sep 17 00:00:00 2001 From: Sahil Prasad Date: Wed, 23 Aug 2017 17:03:41 -0700 Subject: [PATCH] Bumping versions to v2.2.0-kubernetes-0.3.0 --- conf/kubernetes-resource-staging-server.yaml | 2 +- conf/kubernetes-shuffle-service.yaml | 8 ++--- docs/running-on-kubernetes.md | 34 ++++++++++---------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/conf/kubernetes-resource-staging-server.yaml b/conf/kubernetes-resource-staging-server.yaml index 025b9b125d9e0..80d59b8091903 100644 --- a/conf/kubernetes-resource-staging-server.yaml +++ b/conf/kubernetes-resource-staging-server.yaml @@ -32,7 +32,7 @@ spec: name: spark-resource-staging-server-config containers: - name: spark-resource-staging-server - image: kubespark/spark-resource-staging-server:v2.1.0-kubernetes-0.2.0 + image: kubespark/spark-resource-staging-server:v2.2.0-kubernetes-0.3.0 resources: requests: cpu: 100m diff --git a/conf/kubernetes-shuffle-service.yaml b/conf/kubernetes-shuffle-service.yaml index 55c170b01a4f5..8ab0b362ea32e 100644 --- a/conf/kubernetes-shuffle-service.yaml +++ b/conf/kubernetes-shuffle-service.yaml @@ -20,14 +20,14 @@ kind: DaemonSet metadata: labels: app: spark-shuffle-service - spark-version: 2.1.0 + spark-version: 2.2.0 name: shuffle spec: template: metadata: labels: app: spark-shuffle-service - spark-version: 2.1.0 + spark-version: 2.2.0 spec: volumes: - name: temp-volume @@ -38,7 +38,7 @@ spec: # This is an official image that is built # from the dockerfiles/shuffle directory # in the spark distribution. - image: kubespark/spark-shuffle:v2.1.0-kubernetes-0.2.0 + image: kubespark/spark-shuffle:v2.2.0-kubernetes-0.3.0 imagePullPolicy: IfNotPresent volumeMounts: - mountPath: '/tmp' @@ -51,4 +51,4 @@ spec: requests: cpu: "1" limits: - cpu: "1" \ No newline at end of file + cpu: "1" diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md index 4286ab19eb3ad..fd69fb371103f 100644 --- a/docs/running-on-kubernetes.md +++ b/docs/running-on-kubernetes.md @@ -36,15 +36,15 @@ If you wish to use pre-built docker images, you may use the images published in ComponentImage Spark Driver Image - kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 + kubespark/spark-driver:v2.2.0-kubernetes-0.3.0 Spark Executor Image - kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 + kubespark/spark-executor:v2.2.0-kubernetes-0.3.0 Spark Initialization Image - kubespark/spark-init:v2.1.0-kubernetes-0.2.0 + kubespark/spark-init:v2.2.0-kubernetes-0.3.0 @@ -80,9 +80,9 @@ are set up as described above: --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2.0-kubernetes-0.3.0 \ local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar The Spark master, specified either via passing the `--master` command line argument to `spark-submit` or by setting @@ -129,9 +129,9 @@ and then you can compute the value of Pi as follows: --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2.0-kubernetes-0.3.0 \ --conf spark.kubernetes.resourceStagingServer.uri=http://:31000 \ examples/jars/spark_examples_2.11-2.2.0.jar @@ -170,9 +170,9 @@ If our local proxy were listening on port 8001, we would have our submission loo --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2.0-kubernetes-0.3.0 \ local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library. @@ -220,7 +220,7 @@ service because there may be multiple shuffle service instances running in a clu a way to target a particular shuffle service. For example, if the shuffle service we want to use is in the default namespace, and -has pods with labels `app=spark-shuffle-service` and `spark-version=2.1.0`, we can +has pods with labels `app=spark-shuffle-service` and `spark-version=2.2.0`, we can use those tags to target that particular shuffle service at job launch time. In order to run a job with dynamic allocation enabled, the command may then look like the following: @@ -235,7 +235,7 @@ the command may then look like the following: --conf spark.dynamicAllocation.enabled=true \ --conf spark.shuffle.service.enabled=true \ --conf spark.kubernetes.shuffle.namespace=default \ - --conf spark.kubernetes.shuffle.labels="app=spark-shuffle-service,spark-version=2.1.0" \ + --conf spark.kubernetes.shuffle.labels="app=spark-shuffle-service,spark-version=2.2.0" \ local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar 10 400000 2 ## Advanced @@ -312,9 +312,9 @@ communicate with the resource staging server over TLS. The trustStore can be set --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2.0-kubernetes-0.3.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2.0-kubernetes-0.3.0 \ --conf spark.kubernetes.resourceStagingServer.uri=https://:31000 \ --conf spark.ssl.kubernetes.resourceStagingServer.enabled=true \ --conf spark.ssl.kubernetes.resourceStagingServer.clientCertPem=/home/myuser/cert.pem \