Skip to content

Commit 58749d2

Browse files
committed
Run PR check for guided notebooks
1 parent f100ba1 commit 58749d2

File tree

6 files changed

+114
-24
lines changed

6 files changed

+114
-24
lines changed
+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
{
2+
"cell_type": "code",
3+
"execution_count": null,
4+
"metadata": {},
5+
"outputs": [],
6+
"source": [
7+
"from time import sleep\n",
8+
"\n",
9+
"finished = False\n",
10+
"while not finished:\n",
11+
" sleep(5)\n",
12+
" status = client.get_job_status(submission_id)\n",
13+
" finished = (status == \"SUCCEEDED\" or status == \"FAILED\" or status == \"STOPPED\")\n",
14+
" print(status)\n",
15+
"print(\"Job status \" + status)\n",
16+
"print(\"Logs: \")\n",
17+
"print(client.get_job_logs(submission_id))\n",
18+
"assert status == \"SUCCEEDED\", \"Job failed or was stopped!\""
19+
]
20+
}

.github/workflows/e2e_tests.yaml renamed to .github/workflows/guided_notebook_tests.yaml

+61-15
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: e2e
1+
name: Guided notebooks tests
22

33
on:
44
pull_request:
@@ -76,6 +76,8 @@ jobs:
7676

7777
- name: Install NVidia GPU operator for KinD
7878
uses: ./common/github-actions/nvidia-gpu-operator
79+
with:
80+
enable-time-slicing: 'true'
7981

8082
- name: Deploy CodeFlare stack
8183
id: deploy
@@ -113,46 +115,90 @@ jobs:
113115
kubectl create clusterrolebinding sdk-user-list-secrets --clusterrole=list-secrets --user=sdk-user
114116
kubectl config use-context sdk-user
115117
116-
- name: Run e2e tests
118+
- name: Setup Guided notebooks execution
117119
run: |
118-
export CODEFLARE_TEST_OUTPUT_DIR=${{ env.TEMP_DIR }}
119-
echo "CODEFLARE_TEST_OUTPUT_DIR=${CODEFLARE_TEST_OUTPUT_DIR}" >> $GITHUB_ENV
120+
echo "Installing papermill and dependencies..."
121+
pip install poetry papermill ipython ipykernel
122+
# Disable virtualenv due to problems using packaged in virtualenv in papermill
123+
poetry config virtualenvs.create false
120124
121-
set -euo pipefail
122-
pip install poetry
125+
echo "Installing SDK..."
123126
poetry install --with test,docs
124-
echo "Running e2e tests..."
125-
poetry run pytest -v -s ./tests/e2e -m 'kind and nvidia_gpu' > ${CODEFLARE_TEST_OUTPUT_DIR}/pytest_output.log 2>&1
127+
128+
- name: Run 0_basic_ray.ipynb
129+
run: |
130+
set -euo pipefail
131+
132+
# Remove login/logout cells, as KinD doesn't support authentication using token
133+
jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 0_basic_ray.ipynb > 0_basic_ray.ipynb.tmp && mv 0_basic_ray.ipynb.tmp 0_basic_ray.ipynb
134+
jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 0_basic_ray.ipynb > 0_basic_ray.ipynb.tmp && mv 0_basic_ray.ipynb.tmp 0_basic_ray.ipynb
135+
# Run notebook
136+
# poetry run papermill 0_basic_ray.ipynb 0_basic_ray_out.ipynb --log-output --execution-timeout 600
137+
working-directory: demo-notebooks/guided-demos
138+
139+
- name: Run 1_cluster_job_client.ipynb
140+
run: |
141+
set -euo pipefail
142+
143+
# Remove login/logout cells, as KinD doesn't support authentication using token
144+
jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb
145+
jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb
146+
# Replace async logs with waiting for job to finish, async logs don't work properly in papermill
147+
JOB_WAIT=$(jq -r '.' ${GITHUB_WORKSPACE}/.github/resources/wait_for_job_cell.json)
148+
jq --argjson job_wait "$JOB_WAIT" -r '(.cells[] | select(.source[] | contains("async for lines in client.tail_job_logs"))) |= $job_wait' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb
149+
# Run notebook
150+
# poetry run papermill 1_cluster_job_client.ipynb 1_cluster_job_client_out.ipynb --log-output --execution-timeout 1200
151+
working-directory: demo-notebooks/guided-demos
152+
153+
- name: Run 2_basic_interactive.ipynb
154+
run: |
155+
set -euo pipefail
156+
157+
# Remove login/logout cells, as KinD doesn't support authentication using token
158+
jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 2_basic_interactive.ipynb > 2_basic_interactive.ipynb.tmp && mv 2_basic_interactive.ipynb.tmp 2_basic_interactive.ipynb
159+
jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 2_basic_interactive.ipynb > 2_basic_interactive.ipynb.tmp && mv 2_basic_interactive.ipynb.tmp 2_basic_interactive.ipynb
160+
# Rewrite cluster_uri() to local_client_url() to retrieve client URL available out of cluster, as the test is executed outside of cluster
161+
sed -i "s/cluster_uri()/local_client_url()/" 2_basic_interactive.ipynb
162+
# Run notebook
163+
poetry run papermill 2_basic_interactive.ipynb 2_basic_interactive_out.ipynb --log-output --execution-timeout 1200
126164
env:
127165
GRPC_DNS_RESOLVER: "native"
166+
working-directory: demo-notebooks/guided-demos
128167

129168
- name: Switch to kind-cluster context to print logs
130169
if: always() && steps.deploy.outcome == 'success'
131170
run: kubectl config use-context kind-cluster
132171

133-
- name: Print Pytest output log
172+
- name: Print debug info
134173
if: always() && steps.deploy.outcome == 'success'
135174
run: |
136-
echo "Printing Pytest output logs"
137-
cat ${CODEFLARE_TEST_OUTPUT_DIR}/pytest_output.log
175+
echo "Printing debug info"
176+
kubectl describe pods -n default
138177
139178
- name: Print CodeFlare operator logs
140179
if: always() && steps.deploy.outcome == 'success'
141180
run: |
142181
echo "Printing CodeFlare operator logs"
143-
kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${CODEFLARE_TEST_OUTPUT_DIR}/codeflare-operator.log
182+
kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log
183+
184+
- name: Print Kueue operator logs
185+
if: always() && steps.deploy.outcome == 'success'
186+
run: |
187+
echo "Printing Kueue operator logs"
188+
KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}')
189+
kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log
144190
145191
- name: Print KubeRay operator logs
146192
if: always() && steps.deploy.outcome == 'success'
147193
run: |
148194
echo "Printing KubeRay operator logs"
149-
kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${CODEFLARE_TEST_OUTPUT_DIR}/kuberay.log
195+
kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log
150196
151197
- name: Export all KinD pod logs
152198
uses: ./common/github-actions/kind-export-logs
153199
if: always() && steps.deploy.outcome == 'success'
154200
with:
155-
output-directory: ${CODEFLARE_TEST_OUTPUT_DIR}
201+
output-directory: ${TEMP_DIR}
156202

157203
- name: Upload logs
158204
uses: actions/upload-artifact@v4
@@ -161,4 +207,4 @@ jobs:
161207
name: logs
162208
retention-days: 10
163209
path: |
164-
${{ env.CODEFLARE_TEST_OUTPUT_DIR }}/**/*.log
210+
${{ env.TEMP_DIR }}/**/*.log

demo-notebooks/guided-demos/0_basic_ray.ipynb

+3-1
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,12 @@
6262
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
6363
"cluster = Cluster(ClusterConfiguration(\n",
6464
" name='raytest', \n",
65+
" head_cpus='500m',\n",
66+
" head_memory=2,\n",
6567
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
6668
" num_gpus=0,\n",
6769
" num_workers=2,\n",
68-
" min_cpus=1,\n",
70+
" min_cpus='250m',\n",
6971
" max_cpus=1,\n",
7072
" min_memory=4,\n",
7173
" max_memory=4,\n",

demo-notebooks/guided-demos/1_cluster_job_client.ipynb

+3-1
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,12 @@
4444
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
4545
"cluster = Cluster(ClusterConfiguration(\n",
4646
" name='jobtest',\n",
47+
" head_cpus=1,\n",
48+
" head_memory=4,\n",
4749
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
4850
" num_gpus=1,\n",
4951
" num_workers=2,\n",
50-
" min_cpus=1,\n",
52+
" min_cpus='250m',\n",
5153
" max_cpus=1,\n",
5254
" min_memory=4,\n",
5355
" max_memory=4,\n",

demo-notebooks/guided-demos/2_basic_interactive.ipynb

+17-5
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,15 @@
6060
"cluster_name = \"interactivetest\"\n",
6161
"cluster = Cluster(ClusterConfiguration(\n",
6262
" name=cluster_name,\n",
63+
" head_cpus=1,\n",
64+
" head_memory=4,\n",
6365
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
6466
" num_gpus=1,\n",
6567
" num_workers=2,\n",
66-
" min_cpus=2,\n",
67-
" max_cpus=2,\n",
68-
" min_memory=8,\n",
69-
" max_memory=8,\n",
68+
" min_cpus='250m',\n",
69+
" max_cpus=1,\n",
70+
" min_memory=4,\n",
71+
" max_memory=4,\n",
7072
" image=\"quay.io/rhoai/ray:2.23.0-py39-cu121\",\n",
7173
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
7274
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
@@ -251,7 +253,17 @@
251253
"\n",
252254
" ray_trainer = TorchTrainer(\n",
253255
" train_func,\n",
254-
" scaling_config=ScalingConfig(num_workers=3, use_gpu=True),\n",
256+
" scaling_config=ScalingConfig(\n",
257+
" # num_workers = number of worker nodes with the ray head node included\n",
258+
" num_workers=3,\n",
259+
" use_gpu=True,\n",
260+
" resources_per_worker={\n",
261+
" \"CPU\": 1,\n",
262+
" },\n",
263+
" trainer_resources={\n",
264+
" \"CPU\": 0,\n",
265+
" }\n",
266+
" )\n",
255267
" # Configure persistent storage that is accessible across \n",
256268
" # all worker nodes.\n",
257269
" # Uncomment and update the RunConfig below to include your storage details.\n",

demo-notebooks/guided-demos/mnist_fashion.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,16 @@ def train_func_distributed():
7878
trainer = TorchTrainer(
7979
train_func_distributed,
8080
scaling_config=ScalingConfig(
81-
num_workers=3, use_gpu=use_gpu
82-
), # num_workers = number of worker nodes with the ray head node included
81+
# num_workers = number of worker nodes with the ray head node included
82+
num_workers=3,
83+
use_gpu=use_gpu,
84+
resources_per_worker={
85+
"CPU": 1,
86+
},
87+
trainer_resources={
88+
"CPU": 0,
89+
},
90+
),
8391
)
8492

8593
results = trainer.fit()

0 commit comments

Comments
 (0)