# Global release test configuration file. # All your release test configuration should go here. Adding release tests here # will automatically enable them in the Buildkite release testing schedules # (except they have frequency: manual). # Here is an example configuration for reference: #- name: example_test # # Tests with the same group will be grouped in the Buildkite UI # group: Example group # # Provide the working directory which will be uploaded to the cluster # working_dir: example_dir # # # How often to run the tests. # # One of [manual, any, multi, nightly, nightly-3x, weekly]. # # Descriptions of each frequency (that's not immediately obvious): # # - manual: Not run on a schedule, but can be manually run through the buildkite UI. # # - nightly-3x: Run 3 times a week (Monday, Wednesday, Friday). # frequency: weekly # # Owning team. This field will be persisted to the database # team: ml # # # Python version. This optional field determines which Python version to run tests # # on. This must be a string! # python: "3.7" # # # Cluster information # cluster: # # Location of cluster compute, relative to working_dir # cluster_compute: cluster_compute.yaml # # Autosuspend parameter passed to the cluster. # # The cluster will automatically terminate if inactive for this # # many minutes. Defaults to 10 if not set. # autosuspend_mins: 10 # # Optional cloud_id to use instead of the default cloud # cloud_id: cld_12345678 # # Alternatively, you can specify a cloud name # cloud_name: anyscale_default_cloud # # # Run configuration for the test # run: # # If you want to wait for nodes to be ready, you can specify this here: # wait_for_nodes: # # Number of nodes # num_nodes: 16 # # Timeout for waiting for nodes. If nodes are not up by then, the # # test will fail. # timeout: 600 # # # Optional prepare script to be run on the cluster before the test script # prepare: python prepare.py # # The prepare command can have a separate timeout # prepare_timeout: 300 # # # Main script to run as the test script # script: python workloads/train_small.py # # Timeout in seconds. After this time the test is considered as failed. # timeout: 600 # # # You can specify smoke test definitions here. If a smoke test is triggered, # # it will deep update the main test configuration with the values provided # # here. Smoke tests will automatically run with IS_SMOKE_TEST=1 as en # # environment variable and receive the --smoke-test flag as a parameter in the # # run script. # smoke_test: # # Smoke tests can have different frequencies. A smoke test is only triggered # # when the regular test is not matched. # frequency: nightly # # Here we adjust the run timeout down and run on less nodes. The test script # # remains the same. # run: # timeout: 300 # wait_for_nodes: # num_nodes: 4 # timeout: 600 # # # After the test finished, this handler (in alerts/) will process the results. # # It can then let the test fail, e.g. if a metric regression is observed. # alert: default ####################### # Cluster scaling tests ####################### - name: cluster_tune_scale_up_down group: Cluster tests working_dir: cluster_tests frequency: nightly team: ml cluster: byod: {} cluster_compute: cpt_autoscaling_1-3_aws.yaml run: timeout: 3600 script: python workloads/tune_scale_up_down.py wait_for_nodes: num_nodes: 0 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: cpt_autoscaling_1-3_gce.yaml alert: default ############################ # Batch Inference Benchmarks ############################ # 10 GB image classification raw images with 1 GPU. # 1 g4dn.4xlarge - name: torch_batch_inference_1_gpu_10gb_raw group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: compute_gpu_1_cpu_16_aws.yaml run: timeout: 500 script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw --data-format raw alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_1_cpu_16_gce.yaml # 10 GB image classification parquet with 1 GPU. # 1 g4dn.4xlarge - name: torch_batch_inference_1_gpu_10gb_parquet group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: compute_gpu_1_cpu_16_aws.yaml run: timeout: 500 script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw-parquet --data-format parquet alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_1_cpu_16_gce.yaml # 300 GB image classification raw images with 16 GPUs # 4 g4dn.12xlarge - name: torch_batch_inference_16_gpu_300gb_raw group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 1000 script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml - name: chaos_torch_batch_inference_16_gpu_300gb_raw group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: byod: type: gpu cluster_compute: dataset/compute_gpu_4x4_aws.yaml run: timeout: 1000 prepare: python setup_chaos.py --max-to-kill 2 --kill-delay 30 script: python dataset/gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: dataset/compute_gpu_4x4_gce.yaml # 300 GB image classification parquet data with 16 GPUs # 4 g4dn.12xlarge - name: torch_batch_inference_16_gpu_300gb_parquet group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 1000 script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw-parquet --data-format parquet wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml # 10 TB image classification parquet data with heterogenous cluster # 10 g4dn.12xlarge, 10 m5.16xlarge - name: torch_batch_inference_hetero_10tb_parquet group: data-tests working_dir: nightly_tests/dataset frequency: weekly team: data cluster: byod: type: gpu cluster_compute: compute_hetero_10x10_aws.yaml run: timeout: 2000 script: python gpu_batch_inference.py --data-directory 10T-image-data-synthetic-raw-parquet --data-format parquet wait_for_nodes: num_nodes: 20 alert: default ######################### # AIR release tests ######################### - name: tune_with_frequent_pausing group: AIR tests working_dir: air_tests frequency: nightly-3x team: ml cluster: byod: runtime_env: - RAY_memory_usage_threshold=0.5 - automatic_object_spilling_enabled=0 cluster_compute: frequent_pausing/compute_config_aws.yaml run: timeout: 600 # 10min long_running: true script: python frequent_pausing/script.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: frequent_pausing/compute_config_gce.yaml alert: default - name: long_running_horovod_tune_test group: AIR tests working_dir: air_tests frequency: weekly team: ml cluster: byod: type: gpu post_build_script: byod_horovod_master_test.sh cluster_compute: horovod/compute_tpl_aws.yaml variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: horovod/compute_tpl_gce.yaml run: timeout: 36000 script: python horovod/workloads/horovod_tune_test.py long_running: true wait_for_nodes: num_nodes: 2 smoke_test: frequency: manual run: timeout: 3600 alert: default # Ray AIR distributed Torch benchmarks - name: air_benchmark_torch_mnist_cpu_4x1 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_4_aws.yaml run: timeout: 3600 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 8 wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_4_gce.yaml alert: default - name: air_benchmark_torch_mnist_gpu_4x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: weekly team: ml cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 4800 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 120 --num-workers 16 --cpus-per-worker 4 --batch-size 1024 --use-gpu wait_for_nodes: num_nodes: 4 smoke_test: frequency: nightly cluster: cluster_compute: compute_gpu_2x2_aws.yaml run: timeout: 3600 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 60 --num-workers 4 --cpus-per-worker 4 --batch-size 512 --use-gpu wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml smoke_test: frequency: manual alert: default - name: air_benchmark_torch_mnist_cpu_1x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_1_aws.yaml run: timeout: 3600 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_1_gce.yaml alert: default - name: air_benchmark_torch_mnist_cpu_4x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 16 --cpus-per-worker 2 wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_4_gce.yaml alert: default - name: air_benchmark_tune_torch_mnist group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_8_aws.yaml run: timeout: 3600 script: python workloads/tune_torch_benchmark.py --num-runs 3 --num-trials 8 --num-workers 4 wait_for_nodes: num_nodes: 8 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_8_gce.yaml alert: default - name: air_benchmark_tune_torch_mnist_gpu group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 3600 script: python workloads/tune_torch_benchmark.py --num-runs 2 --num-trials 4 --num-workers 4 --use-gpu wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml alert: default # Ray AIR distributed Tensorflow benchmarks - name: air_benchmark_tensorflow_mnist_cpu_4x1 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 8 wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_4_gce.yaml alert: default - name: air_benchmark_tensorflow_mnist_cpu_1x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_cpu_1_aws.yaml run: timeout: 5400 script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_1_gce.yaml alert: default - name: air_benchmark_tensorflow_mnist_cpu_4x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml stable: false cluster: byod: type: gpu cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 16 --cpus-per-worker 2 wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_cpu_4_gce.yaml alert: default - name: air_benchmark_tensorflow_mnist_gpu_4x4 group: AIR tests working_dir: air_tests/air_benchmarks frequency: weekly team: ml stable: false cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 5400 script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 200 --num-workers 16 --cpus-per-worker 4 --batch-size 1024 --use-gpu wait_for_nodes: num_nodes: 4 smoke_test: frequency: nightly cluster: cluster_compute: compute_gpu_2x2_aws.yaml run: script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 60 --num-workers 4 --cpus-per-worker 4 --batch-size 512 --use-gpu wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml smoke_test: frequency: manual alert: default - name: air_benchmark_pytorch_training_e2e_gpu_1x1_20gb group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_gpu_1_aws.yaml run: timeout: 3600 script: python workloads/pytorch_training_e2e.py --data-size-gb 20 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_1_gce.yaml - name: air_benchmark_pytorch_training_e2e_gpu_4x4_100gb group: AIR tests working_dir: air_tests/air_benchmarks frequency: nightly team: ml stable: false cluster: byod: type: gpu cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 10800 script: python workloads/pytorch_training_e2e.py --data-size-gb=100 --num-workers=16 wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_4x4_gce.yaml # Test tiny, and medium input files to check that performance stays about # constant. - name: ray-data-resnet50-ingest-file-size-benchmark group: AIR tests working_dir: air_tests/air_benchmarks/mlperf-train frequency: nightly team: data cluster: byod: type: gpu runtime_env: - RAY_task_oom_retries=50 - RAY_min_memory_free_bytes=1000000000 cluster_compute: compute_cpu_16.yaml run: timeout: 3600 script: bash file_size_benchmark.sh variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_cpu_16.yaml # Test huge files to check that we do not OOM. - name: ray-data-resnet50-ingest-out-of-memory-benchmark group: AIR tests working_dir: air_tests/air_benchmarks/mlperf-train stable: false frequency: nightly team: data cluster: byod: type: gpu runtime_env: - RAY_task_oom_retries=50 - RAY_min_memory_free_bytes=1000000000 cluster_compute: compute_cpu_16.yaml run: timeout: 3600 script: bash oom_benchmark.sh variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_cpu_16.yaml ####################### # AIR examples ####################### # Test additional CPU nodes for preprocessing. - name: air_example_dreambooth_finetuning group: AIR examples working_dir: air_examples/dreambooth stable: false frequency: weekly team: ml cluster: byod: type: gpu cluster_compute: dreambooth_compute_aws.yaml run: timeout: 1800 script: pip install -Ur dreambooth/requirements.txt && bash dreambooth_run.sh artifact_path: /tmp/artifacts/example_out.jpg # variations: A10G not available on GCE, yet. - name: air_example_dreambooth_finetuning_lora group: AIR examples working_dir: air_examples/dreambooth stable: false frequency: weekly team: ml cluster: byod: type: gpu cluster_compute: dreambooth_compute_aws.yaml run: timeout: 1800 script: pip install -Ur dreambooth/requirements.txt && bash dreambooth_run.sh --lora artifact_path: /tmp/artifacts/example_out.jpg - name: air_example_gptj_deepspeed_fine_tuning group: AIR examples working_dir: air_examples/gptj_deepspeed_finetuning frequency: weekly team: ml cluster: byod: type: gpu post_build_script: byod_gptj_test.sh cluster_compute: gptj_deepspeed_compute_aws.yaml run: timeout: 4500 script: python test_myst_doc.py --path gptj_deepspeed_fine_tuning.ipynb - name: air_example_dolly_v2_lightning_fsdp_finetuning group: AIR examples working_dir: air_examples/dolly_v2_lightning_fsdp_finetuning frequency: weekly team: ml cluster: byod: type: gpu post_build_script: byod_dolly_test.sh cluster_compute: dolly_v2_fsdp_compute_aws.yaml run: timeout: 4700 script: python test_myst_doc.py --path lightning-llm-finetuning-7b.ipynb # variations: TODO(jungong): add GCP variation. - name: air_example_vicuna_13b_lightning_deepspeed_finetuning group: AIR examples working_dir: air_examples/vicuna_13b_lightning_deepspeed_finetuning frequency: weekly team: ml cluster: byod: type: gpu post_build_script: byod_vicuna_test.sh cluster_compute: vicuna_13b_deepspeed_compute_aws.yaml run: timeout: 4700 script: python test_myst_doc.py --path vicuna_13b_lightning_deepspeed_finetune.ipynb ##################################### # Workspace templates release tests # ##################################### - name: workspace_template_batch_inference group: Workspace templates working_dir: workspace_templates/01_batch_inference frequency: nightly-3x team: data cluster: byod: type: gpu cluster_compute: ../testing/compute_configs/gpu/aws.yaml run: timeout: 600 script: jupyter nbconvert --to script --output _test start.ipynb && ipython _test.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../testing/compute_configs/gpu/gce.yaml - name: workspace_template_many_model_training group: Workspace templates working_dir: workspace_templates/02_many_model_training frequency: nightly-3x team: ml cluster: byod: type: gpu cluster_compute: ../testing/compute_configs/cpu/aws.yaml run: timeout: 600 script: jupyter nbconvert --to script --output _test start.ipynb && ipython _test.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../testing/compute_configs/cpu/gce.yaml - name: workspace_template_serving_stable_diffusion group: Workspace templates working_dir: workspace_templates/03_serving_stable_diffusion frequency: nightly-3x team: serve cluster: byod: type: gpu post_build_script: byod_stable_diffusion.sh cluster_compute: ../testing/compute_configs/gpu/aws.yaml run: timeout: 600 script: jupyter nbconvert --to script --output _test start.ipynb && ipython _test.py && serve run app:entrypoint --non-blocking && python query.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../testing/compute_configs/gpu/gce.yaml - name: workspace_template_finetuning_llms_with_deepspeed_llama_2_7b group: Workspace templates working_dir: workspace_templates/04_finetuning_llms_with_deepspeed frequency: nightly-3x team: ml cluster: byod: type: cu123 # This needs to be in sync with requirements under go/llm-forge. post_build_script: byod_finetune_llvms.sh cluster_compute: ../testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml run: timeout: 1000 script: chmod +x ./run_llama_ft.sh && ./run_llama_ft.sh --size=7b --as-test variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../testing/compute_configs/04_finetuning_llms_with_deepspeed/gce_7b.yaml - name: workspace_template_finetuning_llms_with_deepspeed_llama_2_7b_lora group: Workspace templates working_dir: workspace_templates/04_finetuning_llms_with_deepspeed frequency: nightly-3x team: ml cluster: byod: type: cu123 # This needs to be in sync with requirements under go/llm-forge. post_build_script: byod_finetune_llvms.sh cluster_compute: ../testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml run: timeout: 1000 script: chmod +x ./run_llama_ft.sh && ./run_llama_ft.sh --size=7b --lora --as-test variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../testing/compute_configs/04_finetuning_llms_with_deepspeed/gce_7b.yaml ####################### # ML user tests ####################### - name: ml_user_horovod_user_test_latest group: ML user tests working_dir: ml_user_tests frequency: nightly-3x team: ml cluster: byod: type: gpu post_build_script: byod_horovod_test.sh cluster_compute: horovod/compute_tpl_aws.yaml run: timeout: 1200 script: python horovod/horovod_user_test.py wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: horovod/compute_tpl_gce.yaml alert: default - name: ml_user_horovod_user_test_master group: ML user tests working_dir: ml_user_tests frequency: nightly-3x team: ml cluster: byod: type: gpu post_build_script: byod_horovod_master_test.sh cluster_compute: horovod/compute_tpl_aws.yaml run: timeout: 1200 script: python horovod/horovod_user_test.py wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: horovod/compute_tpl_gce.yaml alert: default - name: ml_user_train_tensorflow_mnist_test group: ML user tests working_dir: ml_user_tests frequency: nightly-3x team: ml cluster: byod: runtime_env: - TRAIN_PLACEMENT_GROUP_TIMEOUT_S=2000 type: cu123 cluster_compute: train/compute_tpl_aws.yaml run: timeout: 36000 script: python train/train_tensorflow_mnist_test.py wait_for_nodes: num_nodes: 3 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: train/compute_tpl_gce.yaml alert: default - name: ml_user_train_torch_linear_test group: ML user tests working_dir: ml_user_tests frequency: nightly-3x team: ml cluster: byod: runtime_env: - TRAIN_PLACEMENT_GROUP_TIMEOUT_S=2000 type: gpu cluster_compute: train/compute_tpl_aws.yaml run: timeout: 36000 script: python train/train_torch_linear_test.py wait_for_nodes: num_nodes: 3 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: train/compute_tpl_gce.yaml alert: default - name: ml_user_tune_rllib_connect_test group: ML user tests working_dir: ml_user_tests frequency: nightly-3x team: ml cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: tune_rllib/compute_tpl_aws.yaml run: timeout: 2000 script: python tune_rllib/run_connect_tests.py wait_for_nodes: num_nodes: 9 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tune_rllib/compute_tpl_gce.yaml alert: default ####################### # Tune cloud tests ####################### - name: tune_cloud_long_running_cloud_storage group: Tune cloud tests working_dir: tune_tests/cloud_tests frequency: weekly team: ml cluster: byod: {} cluster_compute: tpl_aws_1x4.yaml run: # 14 hours timeout: 50400 long_running: true script: python workloads/long_running_cloud_storage.py s3://tune-cloud-tests/long_running_cloud_storage # NOTE: This smoke test is not useful to run because the point of the test # is to be long running. This is just for debugging updates to the test quickly. smoke_test: frequency: manual run: timeout: 600 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_gce_1x4.yaml run: # 14 hours timeout: 50400 long_running: true script: python workloads/long_running_cloud_storage.py gs://tune-cloud-tests/long_running_cloud_storage wait_for_nodes: num_nodes: 1 alert: long_running_tests ######################## # Tune scalability tests ######################## - name: tune_scalability_bookkeeping_overhead group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: nightly team: ml cluster: byod: {} cluster_compute: tpl_1x16.yaml run: timeout: 1200 script: python workloads/test_bookkeeping_overhead.py alert: tune_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_gce_1x16.yaml - name: tune_scalability_durable_trainable group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: nightly team: ml cluster: byod: {} cluster_compute: tpl_16x2.yaml run: timeout: 900 script: python workloads/test_durable_trainable.py --bucket s3://tune-cloud-tests/scalability_durable_trainable wait_for_nodes: num_nodes: 16 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual run: timeout: 900 script: python workloads/test_durable_trainable.py --bucket gs://tune-cloud-tests/scalability_durable_trainable wait_for_nodes: num_nodes: 16 cluster: cluster_compute: tpl_gce_16x2.yaml alert: tune_tests - name: tune_scalability_durable_multifile_checkpoints group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: nightly team: ml cluster: byod: {} cluster_compute: tpl_16x2.yaml run: timeout: 900 script: python workloads/test_durable_multifile_checkpoints.py --bucket s3://tune-cloud-tests/scalability_durable_multifile_checkpoints wait_for_nodes: num_nodes: 16 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual run: timeout: 900 script: python workloads/test_durable_multifile_checkpoints.py --bucket gs://tune-cloud-tests/scalability_durable_multifile_checkpoints wait_for_nodes: num_nodes: 16 cluster: cluster_compute: tpl_gce_16x2.yaml alert: tune_tests - name: tune_scalability_long_running_large_checkpoints group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: weekly team: ml cluster: byod: {} cluster_compute: tpl_1x32_hd.yaml run: timeout: 86400 script: python workloads/test_long_running_large_checkpoints.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: tune_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual cluster: cluster_compute: tpl_gce_1x32_hd.yaml - name: tune_scalability_network_overhead group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: weekly team: ml cluster: byod: {} cluster_compute: tpl_100x2.yaml run: timeout: 750 prepare_timeout: 1200 script: python workloads/test_network_overhead.py wait_for_nodes: num_nodes: 100 alert: tune_tests variations: - __suffix__: aws - __suffix__: smoke-test frequency: nightly cluster: cluster_compute: tpl_20x2.yaml run: timeout: 750 prepare_timeout: 600 script: python workloads/test_network_overhead.py --smoke-test wait_for_nodes: num_nodes: 20 - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_gce_100x2.yaml - name: tune_scalability_result_throughput_cluster group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: nightly-3x team: ml cluster: byod: {} cluster_compute: tpl_16x64.yaml run: timeout: 600 script: python workloads/test_result_throughput_cluster.py wait_for_nodes: num_nodes: 16 alert: tune_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_gce_16x64.yaml - name: tune_scalability_result_throughput_single_node group: Tune scalability tests working_dir: tune_tests/scalability_tests frequency: nightly team: ml cluster: byod: {} cluster_compute: tpl_1x96.yaml run: timeout: 600 script: python workloads/test_result_throughput_single_node.py alert: tune_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_gce_1x96.yaml ############################ # Tune fault tolerance tests ############################ - name: tune_worker_fault_tolerance group: Tune fault tolerance tests working_dir: tune_tests/fault_tolerance_tests stable: true frequency: nightly-3x team: ml cluster: byod: {} cluster_compute: tpl_aws_16x1.yaml run: timeout: 5400 script: python workloads/test_tune_worker_fault_tolerance.py --bucket s3://tune-cloud-tests/worker_fault_tolerance wait_for_nodes: num_nodes: 16 # Disabled until we can kill nodes in GCE # variations: # - __suffix__: aws # - __suffix__: gce # env: gce # frequency: manual # run: # timeout: 5400 # script: python workloads/test_tune_worker_fault_tolerance.py --bucket gs://tune-cloud-tests/worker_fault_tolerance # # wait_for_nodes: # num_nodes: 16 # cluster: # cluster_compute: tpl_gce_16x1.yaml ######################## # Golden Notebook tests ######################## - name: golden_notebook_torch_tune_serve_test group: Golden Notebook tests working_dir: golden_notebook_tests frequency: nightly-3x team: ml cluster: byod: type: gpu cluster_compute: gpu_tpl_aws.yaml run: timeout: 600 script: python workloads/torch_tune_serve_test.py wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: gpu_tpl_gce.yaml alert: default ####################### # Long running tests ####################### - name: long_running_actor_deaths group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/actor_deaths.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_apex group: Long running tests working_dir: long_running_tests stable: false frequency: weekly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: tpl_cpu_3.yaml run: timeout: 86400 script: python workloads/apex.py long_running: true wait_for_nodes: num_nodes: 3 smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_3_gce.yaml - name: long_running_impala group: Long running tests working_dir: long_running_tests frequency: weekly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: tpl_cpu_1_large.yaml run: timeout: 86400 script: python workloads/impala.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_large_gce.yaml - name: long_running_many_actor_tasks group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/many_actor_tasks.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_many_drivers group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_4.yaml run: timeout: 86400 script: python workloads/many_drivers.py --iteration-num=4000 long_running: true wait_for_nodes: num_nodes: 4 smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_4_gce.yaml - name: long_running_many_ppo group: Long running tests working_dir: long_running_tests stable: false frequency: weekly team: ml cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: many_ppo.yaml run: timeout: 86400 script: python workloads/many_ppo.py long_running: true wait_for_nodes: num_nodes: 1 smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: many_ppo_gce.yaml - name: long_running_many_tasks group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/many_tasks.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_many_tasks_serialized_ids group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/many_tasks_serialized_ids.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_node_failures group: Long running tests working_dir: long_running_tests frequency: weekly team: core cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/node_failures.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_serve group: Long running tests working_dir: long_running_tests frequency: weekly team: serve cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1_large.yaml run: timeout: 86400 script: python workloads/serve.py long_running: true smoke_test: frequency: nightly run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_serve_failure group: Long running tests working_dir: long_running_tests stable: true frequency: weekly team: serve cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1_c5.yaml run: timeout: 86400 script: python workloads/serve_failure.py long_running: true smoke_test: frequency: nightly run: timeout: 600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 86400 cluster: cluster_compute: tpl_cpu_1_c5_gce.yaml - name: long_running_many_jobs group: Long running tests working_dir: long_running_tests stable: true frequency: weekly team: serve cluster: byod: pip: # TODO: https://github.com/Farama-Foundation/AutoROM/issues/48 - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 cluster_compute: tpl_cpu_1.yaml run: timeout: 86400 script: python workloads/long_running_many_jobs.py --num-clients=1 long_running: true smoke_test: frequency: nightly run: timeout: 1800 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: tpl_cpu_1_gce.yaml - name: long_running_distributed_pytorch_pbt_failure group: Long running tests working_dir: long_running_distributed_tests frequency: weekly team: ml cluster: byod: type: gpu cluster_compute: compute_tpl.yaml run: timeout: 86400 script: python workloads/pytorch_pbt_failure.py long_running: true smoke_test: frequency: manual run: timeout: 3600 alert: long_running_tests variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual smoke_test: frequency: manual run: timeout: 3600 cluster: cluster_compute: compute_tpl_gce.yaml ######################## # Jobs tests ######################## - name: jobs_basic_local_working_dir group: Jobs tests working_dir: jobs_tests frequency: nightly team: serve cluster: byod: type: gpu cluster_compute: compute_tpl_4_xlarge.yaml run: timeout: 600 script: python workloads/jobs_basic.py --working-dir "workloads" wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce_4_xlarge.yaml - name: jobs_basic_remote_working_dir group: Jobs tests working_dir: jobs_tests frequency: nightly team: serve cluster: byod: type: gpu cluster_compute: compute_tpl_4_xlarge.yaml run: timeout: 600 script: python workloads/jobs_basic.py --working-dir "https://github.com/anyscale/job-services-cuj-examples/archive/refs/heads/main.zip" wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce_4_xlarge.yaml - name: jobs_remote_multi_node group: Jobs tests team: serve frequency: nightly working_dir: jobs_tests cluster: byod: type: gpu cluster_compute: compute_tpl_4_xlarge.yaml run: timeout: 600 script: python workloads/jobs_remote_multi_node.py wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce_4_xlarge.yaml - name: jobs_check_cuda_available group: Jobs tests team: serve frequency: nightly working_dir: jobs_tests cluster: byod: type: gpu cluster_compute: compute_tpl_gpu_node.yaml run: timeout: 600 script: python workloads/jobs_check_cuda_available.py wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce_gpu_node.yaml - name: jobs_specify_num_gpus group: Jobs tests team: serve frequency: nightly working_dir: jobs_tests cluster: byod: type: gpu cluster_compute: compute_tpl_gpu_worker.yaml run: timeout: 600 script: python workloads/jobs_specify_num_gpus.py --working-dir "workloads" wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce_gpu_worker.yaml ######################## # Runtime env tests ######################## - name: runtime_env_rte_many_tasks_actors group: Runtime env tests working_dir: runtime_env_tests frequency: nightly team: core cluster: byod: {} cluster_compute: rte_small.yaml run: timeout: 600 script: python workloads/rte_many_tasks_actors.py wait_for_nodes: num_nodes: 4 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: rte_gce_small.yaml - name: runtime_env_wheel_urls group: Runtime env tests working_dir: runtime_env_tests frequency: nightly team: core cluster: byod: {} cluster_compute: rte_minimal.yaml run: timeout: 9000 script: python workloads/wheel_urls.py wait_for_nodes: num_nodes: 1 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: rte_gce_minimal.yaml # It seems like the consensus is that this should be tested in CI, and not in a nightly test. # - name: runtime_env_rte_ray_client # group: Runtime env tests # working_dir: runtime_env_tests # frequency: nightly # team: core # cluster: # cluster_compute: rte_minimal.yaml # run: # timeout: 600 # script: python workloads/rte_ray_client.py # wait_for_nodes: # num_nodes: 1 # alert: default ######################## # Serve tests ######################## - name: serve_scale_replicas group: Serve tests working_dir: serve_tests frequency: nightly team: serve cluster: byod: {} cluster_compute: compute_tpl_single_node_32_cpu.yaml cloud_id: cld_wy5a6nhazplvu32526ams61d98 run: timeout: 7200 long_running: false script: python workloads/replica_scalability.py alert: default variations: - __suffix__: aws - name: serve_multi_deployment_1k_noop_replica group: Serve tests working_dir: serve_tests frequency: nightly team: serve cluster: byod: {} cluster_compute: compute_tpl_32_cpu.yaml cloud_id: cld_wy5a6nhazplvu32526ams61d98 run: timeout: 7200 long_running: false script: python workloads/multi_deployment_1k_noop_replica.py alert: default variations: - __suffix__: aws - __suffix__: aws.py312 python: "3.12" - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_32_cpu_gce.yaml - name: serve_autoscaling_load_test group: Serve tests working_dir: serve_tests frequency: nightly team: serve cluster: byod: type: gpu cluster_compute: compute_tpl_single_node_32_cpu.yaml cloud_id: cld_wy5a6nhazplvu32526ams61d98 run: timeout: 7200 long_running: false script: python workloads/autoscaling_load_test.py alert: default variations: - __suffix__: aws - name: serve_microbenchmarks group: Serve tests working_dir: serve_tests frequency: nightly team: serve cluster: byod: {} cluster_compute: compute_tpl_single_node_32_cpu.yaml cloud_id: cld_wy5a6nhazplvu32526ams61d98 run: timeout: 7200 long_running: false script: python workloads/microbenchmarks.py --run-all alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_single_node_gce.yaml - name: serve_resnet_benchmark group: Serve tests working_dir: serve_tests frequency: nightly team: serve cluster: byod: type: gpu cluster_compute: compute_tpl_gpu_node.yaml cloud_id: cld_wy5a6nhazplvu32526ams61d98 run: timeout: 7200 long_running: false script: python workloads/serve_resnet_benchmark.py --gpu-env alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gpu_node_gce.yaml ######################## # Train tests ######################## - name: train_horovod_multi_node_test group: Train tests working_dir: train_tests/horovod frequency: nightly team: ml cluster: byod: type: gpu post_build_script: byod_horovod_test.sh cluster_compute: compute_tpl_aws.yaml run: timeout: 3000 script: python train_horovod_multi_node_test.py wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_tpl_gce.yaml alert: default - name: train_multinode_persistence group: Train tests working_dir: train_tests/multinode_persistence frequency: nightly team: ml cluster: byod: post_build_script: byod_train_persistence_test.sh cluster_compute: compute_aws.yaml run: timeout: 3000 script: pytest -v test_persistence.py -s wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce.yaml alert: default - name: train_colocate_trainer group: Train tests working_dir: train_tests/colocate_trainer frequency: nightly team: ml cluster: byod: {} cluster_compute: compute_aws.yaml run: timeout: 3000 script: pytest -v test_colocate_trainer.py -s wait_for_nodes: num_nodes: 4 alert: default - name: xgboost_train_batch_inference_benchmark_10G group: Train tests working_dir: train_tests/xgboost_lightgbm frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_aws_1worker.yaml run: timeout: 36000 script: python train_batch_inference_benchmark.py "xgboost" --size=10G wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_1worker.yaml smoke_test: frequency: manual run: timeout: 1800 alert: default - name: xgboost_train_batch_inference_benchmark_100G group: Train tests working_dir: train_tests/xgboost_lightgbm frequency: nightly-3x team: ml cluster: byod: type: gpu cluster_compute: compute_aws_10workers.yaml run: timeout: 36000 script: python train_batch_inference_benchmark.py "xgboost" --size=100G wait_for_nodes: num_nodes: 11 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_10workers.yaml smoke_test: frequency: manual run: timeout: 1800 alert: default - name: lightgbm_train_batch_inference_benchmark_10G group: Train tests working_dir: train_tests/xgboost_lightgbm frequency: nightly team: ml cluster: byod: type: gpu cluster_compute: compute_aws_1worker.yaml run: timeout: 36000 script: python train_batch_inference_benchmark.py "lightgbm" --size=10G wait_for_nodes: num_nodes: 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_1worker.yaml smoke_test: frequency: manual run: timeout: 1800 alert: default - name: lightgbm_train_batch_inference_benchmark_100G group: Train tests working_dir: train_tests/xgboost_lightgbm frequency: nightly-3x team: ml cluster: byod: type: gpu cluster_compute: compute_aws_10workers.yaml run: timeout: 36000 script: python train_batch_inference_benchmark.py "lightgbm" --size=100G wait_for_nodes: num_nodes: 11 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gce_10workers.yaml smoke_test: frequency: manual run: timeout: 1800 alert: default ######################## # RLlib tests ######################## # ---------------------------------------------------------- # Checkpointing with RLModule and Learner APIs # ---------------------------------------------------------- - name: rllib_learner_group_checkpointing_multinode group: RLlib tests working_dir: rllib_tests frequency: nightly team: rllib stable: False cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: multi_node_checkpointing_compute_config.yaml run: timeout: 3600 script: pytest checkpointing_tests/test_learner_group_checkpointing.py wait_for_nodes: num_nodes: 2 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: multi_node_checkpointing_compute_config_gce.yaml - name: rllib_learner_e2e_module_loading group: RLlib tests working_dir: rllib_tests stable: false frequency: nightly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: multi_node_checkpointing_compute_config.yaml run: timeout: 3600 script: pytest checkpointing_tests/test_e2e_rl_module_restore.py wait_for_nodes: num_nodes: 2 alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: multi_node_checkpointing_compute_config_gce.yaml # ---------------------------------------------------------- # Learning and benchmarking tests # ---------------------------------------------------------- # -------------------------- # DreamerV3 # -------------------------- # TODO (sven): Move algo and this test to pytorch - name: rllib_learning_tests_pong_dreamerv3_tf2 group: RLlib tests working_dir: rllib_tests stable: false frequency: weekly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_dreamerv3_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: 1gpu_4cpus.yaml run: timeout: 43200 # 12h script: python learning_tests/tuned_examples/dreamerv3/atari_100k.py --framework=tf2 --env=ALE/Pong-v5 --num-gpus=1 --stop-reward=15.0 --as-release-test alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: 1gpu_4cpus_gce.yaml # -------------------------- # PPO # -------------------------- - name: rllib_learning_tests_pong_ppo_torch group: RLlib tests working_dir: rllib_tests stable: true frequency: nightly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: 8gpus_96cpus.yaml run: timeout: 1200 script: python learning_tests/tuned_examples/ppo/atari_ppo.py --enable-new-api-stack --env=ALE/Pong-v5 --num-gpus=4 --num-env-runners=95 --stop-reward=20.0 --as-release-test alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: 8gpus_96cpus_gce.yaml # -------------------------- # SAC # -------------------------- - name: rllib_learning_tests_halfcheetah_sac_torch group: RLlib tests working_dir: rllib_tests stable: true frequency: nightly team: rllib cluster: byod: type: gpu post_build_script: byod_rllib_test.sh runtime_env: - RLLIB_TEST_NO_JAX_IMPORT=1 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin cluster_compute: 4gpus_64cpus.yaml run: timeout: 7200 script: python learning_tests/tuned_examples/sac/halfcheetah_sac.py --enable-new-api-stack --num-gpus=4 --num-env-runners=8 --stop-reward=1000.0 --as-release-test alert: default variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: 4gpus_64cpus_gce.yaml ######################## # Core Nightly Tests ######################## - name: shuffle_100gb group: core-multi-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo cluster_compute: shuffle/shuffle_compute_multi.yaml run: timeout: 3000 script: python shuffle/shuffle_test.py --num-partitions=200 --partition-size=500e6 wait_for_nodes: num_nodes: 4 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/shuffle_compute_multi_gce.yaml - name: stress_test_placement_group group: core-multi-test working_dir: nightly_tests env: aws_perf frequency: nightly team: core cluster: byod: {} cluster_compute: stress_tests/placement_group_tests_compute.yaml run: timeout: 7200 script: python stress_tests/test_placement_group.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/placement_group_tests_compute_gce.yaml - name: decision_tree_autoscaling_20_runs group: core-multi-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: decision_tree/autoscaling_compute.yaml run: timeout: 9600 script: python decision_tree/cart_with_tree.py --concurrency=20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: decision_tree/autoscaling_compute_gce.yaml - name: autoscaling_shuffle_1tb_1000_partitions group: core-multi-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo cluster_compute: shuffle/shuffle_compute_autoscaling.yaml run: timeout: 4000 script: python shuffle/shuffle_test.py --num-partitions=1000 --partition-size=1e9 --no-streaming variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/shuffle_compute_autoscaling_gce.yaml - name: microbenchmark group: core-daily-test team: core frequency: nightly env: aws_perf working_dir: microbenchmark cluster: byod: {} cluster_compute: tpl_64.yaml run: timeout: 1800 script: OMP_NUM_THREADS=64 RAY_ADDRESS=local python run_microbenchmark.py variations: - __suffix__: aws repeated_run: 5 - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: tpl_64_gce.yaml - __suffix__: aws.py312 frequency: weekly python: "3.12" - name: microbenchmark_unstable group: core-daily-test team: core frequency: nightly working_dir: microbenchmark stable: false cluster: byod: {} cluster_compute: tpl_64.yaml run: timeout: 1800 script: OMP_NUM_THREADS=64 RAY_ADDRESS=local python run_microbenchmark.py --experimental - name: microbenchmark_gpu_unstable group: core-daily-test team: core frequency: nightly working_dir: microbenchmark stable: false cluster: byod: type: gpu cluster_compute: experimental/compute_gpu_2_aws.yaml run: timeout: 1800 script: python experimental/accelerated_dag_gpu_microbenchmark.py - name: microbenchmark_gpu_multinode_unstable group: core-daily-test team: core frequency: nightly working_dir: microbenchmark stable: false cluster: byod: type: gpu cluster_compute: experimental/compute_gpu_2x1_aws.yaml run: timeout: 1800 script: python experimental/accelerated_dag_gpu_microbenchmark.py --distributed - name: benchmark_worker_startup group: core-daily-test team: core frequency: nightly working_dir: benchmark-worker-startup stable: false cluster: byod: type: gpu cluster_compute: only_head_node_1gpu_64cpu.yaml run: timeout: 7200 script: python benchmark_worker_startup.py --num_cpus_in_cluster 64 --num_gpus_in_cluster 64 --num_tasks_or_actors_per_run 64 --num_measurements_per_configuration 5 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: only_head_node_1gpu_64cpu_gce.yaml - name: dask_on_ray_100gb_sort group: core-daily-test working_dir: nightly_tests frequency: nightly team: core # https://github.com/ray-project/ray/issues/39165 stable: false cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo cluster_compute: dask_on_ray/dask_on_ray_sort_compute_template.yaml run: timeout: 7200 script: python dask_on_ray/dask_on_ray_sort.py --nbytes 100_000_000_000 --npartitions 200 --num-nodes 1 --ray --data-dir /tmp/ray --file-path /tmp/ray variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: dask_on_ray/dask_on_ray_sort_compute_template_gce.yaml - name: dask_on_ray_large_scale_test_spilling group: core-daily-test working_dir: nightly_tests frequency: nightly team: data cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo cluster_compute: dask_on_ray/dask_on_ray_stress_compute.yaml run: timeout: 7200 script: python dask_on_ray/large_scale_test.py --num_workers 150 --worker_obj_store_size_in_gb 70 --error_rate 0 --data_save_path /tmp/ray wait_for_nodes: num_nodes: 21 smoke_test: frequency: nightly cluster: cluster_compute: dask_on_ray/large_scale_dask_on_ray_compute_template.yaml run: timeout: 7200 script: python dask_on_ray/large_scale_test.py --num_workers 32 --worker_obj_store_size_in_gb 70 --error_rate 0 --data_save_path /tmp/ray wait_for_nodes: num_nodes: 5 - name: stress_test_state_api_scale group: core-daily-test working_dir: nightly_tests stable: false frequency: nightly team: core cluster: byod: runtime_env: - RAY_MAX_LIMIT_FROM_API_SERVER=1000000000 - RAY_MAX_LIMIT_FROM_DATA_SOURCE=1000000000 cluster_compute: stress_tests/stress_tests_compute_large.yaml run: timeout: 4200 script: python stress_tests/test_state_api_scale.py wait_for_nodes: num_nodes: 7 smoke_test: frequency: nightly cluster: app_config: stress_tests/state_api_app_config.yaml cluster_compute: stress_tests/smoke_test_compute.yaml run: timeout: 3600 wait_for_nodes: num_nodes: 5 script: python stress_tests/test_state_api_scale.py --smoke-test variations: - __suffix__: aws - __suffix__: aws.py312 frequency: manual python: "3.12" smoke_test: frequency: nightly-3x - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/stress_tests_compute_large_gce.yaml smoke_test: frequency: manual - name: shuffle_20gb_with_state_api group: core-daily-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: runtime_env: - RAY_MAX_LIMIT_FROM_API_SERVER=1000000000 - RAY_MAX_LIMIT_FROM_DATA_SOURCE=1000000000 cluster_compute: shuffle/shuffle_compute_single.yaml run: timeout: 1000 script: python stress_tests/test_state_api_with_other_tests.py nightly_tests/shuffle/shuffle_test.py --test-args="--num-partitions=100 --partition-size=200e6" variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/shuffle_compute_single_gce.yaml - name: stress_test_many_tasks group: core-daily-test working_dir: nightly_tests env: aws_perf frequency: nightly team: core cluster: byod: {} cluster_compute: stress_tests/stress_tests_compute.yaml run: timeout: 14400 wait_for_nodes: num_nodes: 101 script: python stress_tests/test_many_tasks.py smoke_test: frequency: nightly cluster: app_config: stress_tests/stress_tests_app_config.yaml cluster_compute: stress_tests/smoke_test_compute.yaml run: timeout: 3600 wait_for_nodes: num_nodes: 5 script: python stress_tests/test_many_tasks.py --num-nodes=4 --smoke-test variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/stress_tests_compute_gce.yaml smoke_test: frequency: manual - name: stress_test_dead_actors group: core-daily-test working_dir: nightly_tests env: aws_perf frequency: nightly team: core cluster: byod: {} cluster_compute: stress_tests/stress_tests_compute.yaml run: timeout: 7200 wait_for_nodes: num_nodes: 101 script: python stress_tests/test_dead_actors.py smoke_test: frequency: nightly cluster: app_config: stress_tests/stress_tests_app_config.yaml cluster_compute: stress_tests/smoke_test_compute.yaml run: timeout: 3600 wait_for_nodes: num_nodes: 5 script: python stress_tests/test_dead_actors.py --num-nodes=4 --num-parents=3 --num-children=3 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/stress_tests_compute_gce.yaml smoke_test: frequency: manual # The full test is not stable, so run the smoke test only. # See https://github.com/ray-project/ray/issues/23244. - name: threaded_actors_stress_test group: core-daily-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: stress_tests/smoke_test_compute.yaml run: timeout: 3600 script: python stress_tests/test_threaded_actors.py --test-runtime 1800 --kill-interval_s 30 wait_for_nodes: num_nodes: 5 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/smoke_test_compute_gce.yaml # - name: threaded_actors_stress_test # group: core-daily-test # working_dir: nightly_tests # # frequency: nightly # team: core # cluster: # cluster_compute: stress_tests/stress_test_threaded_actor_compute.yaml # # run: # timeout: 7200 # script: python stress_tests/test_threaded_actors.py --test-runtime 3600 --kill-interval_s # 60 # # wait_for_nodes: # num_nodes: 201 # timeout: 600 # # smoke_test: # frequency: nightly # cluster: # app_config: stress_tests/stress_tests_app_config.yaml # cluster_compute: stress_tests/smoke_test_compute.yaml # # run: # timeout: 3600 # script: python stress_tests/test_threaded_actors.py --test-runtime 1800 --kill-interval_s # 30 # # wait_for_nodes: # num_nodes: 5 # timeout: 600 - name: stress_test_many_runtime_envs group: core-daily-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: stress_tests/smoke_test_compute.yaml run: timeout: 14400 wait_for_nodes: num_nodes: 5 script: python stress_tests/test_many_runtime_envs.py --num_runtime_envs=100 --num_tasks=10000 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/smoke_test_compute_gce.yaml smoke_test: frequency: manual - name: single_node_oom group: core-daily-test working_dir: nightly_tests # TODO: https://github.com/ray-project/ray/issues/47596 stable: false frequency: nightly team: core env: aws_perf cluster: byod: {} cluster_compute: stress_tests/stress_tests_single_node_oom_compute.yaml run: timeout: 1000 script: python stress_tests/test_parallel_tasks_memory_pressure.py --num-tasks 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stress_tests/stress_tests_single_node_oom_compute_gce.yaml - name: tune_air_oom group: core-daily-test working_dir: air_tests stable: false frequency: nightly team: core cluster: byod: runtime_env: - RAY_memory_usage_threshold=0.7 - RAY_task_oom_retries=-1 cluster_compute: oom/stress_tests_tune_air_oom_compute.yaml run: timeout: 3600 script: bash oom/tune_air_oom.sh - name: dask_on_ray_1tb_sort group: core-daily-test working_dir: nightly_tests frequency: nightly-3x team: core cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo cluster_compute: dask_on_ray/1tb_sort_compute.yaml run: timeout: 7200 script: python dask_on_ray/dask_on_ray_sort.py --nbytes 1_000_000_000_000 --npartitions 1000 --num-nodes 31 --ray --data-dir /tmp/ray --s3-bucket core-nightly-test wait_for_nodes: num_nodes: 32 - name: many_nodes_actor_test_on_v2 group: core-daily-test working_dir: benchmarks frequency: nightly-3x team: core cluster: byod: {} cluster_compute: distributed/many_nodes_tests/compute_config.yaml run: timeout: 3600 # 2cpus per node x 1000 nodes / 0.2 cpus per actor = 10k # 2cpus per node x 2000 nodes / 0.2 cpus per actor = 20k script: python distributed/many_nodes_tests/actor_test.py --no-wait --cpus-per-actor=0.2 --total-actors 10000 20000 wait_for_nodes: num_nodes: 500 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: distributed/many_nodes_tests/compute_config_gce.yaml #- name: many_nodes_multi_master_test # group: core-daily-test # working_dir: nightly_tests # # frequency: nightly-3x # team: core # cluster: # cluster_compute: many_nodes_tests/compute_config.yaml # # run: # timeout: 7200 # script: python many_nodes_tests/multi_master_test.py # wait_for_nodes: # num_nodes: 251 # - name: pg_autoscaling_regression_test group: core-daily-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: placement_group_tests/compute.yaml run: timeout: 1200 script: python placement_group_tests/pg_run.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: placement_group_tests/compute_gce.yaml - name: placement_group_performance_test group: core-daily-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: placement_group_tests/pg_perf_test_compute.yaml run: timeout: 1200 script: python placement_group_tests/placement_group_performance_test.py wait_for_nodes: num_nodes: 5 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: placement_group_tests/pg_perf_test_compute_gce.yaml ######################### # Core Scalability Tests ######################### - name: single_node group: core-scalability-test working_dir: benchmarks frequency: nightly team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: single_node.yaml run: timeout: 12000 prepare: sleep 0 script: python single_node/test_single_node.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_gce.yaml - name: object_store group: core-scalability-test working_dir: benchmarks frequency: nightly team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: object_store.yaml run: timeout: 3600 script: python object_store/test_object_store.py wait_for_nodes: num_nodes: 50 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: object_store_gce.yaml - name: many_actors group: core-scalability-test working_dir: benchmarks frequency: nightly-3x team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: distributed.yaml run: timeout: 3600 script: python distributed/test_many_actors.py wait_for_nodes: num_nodes: 65 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: distributed_gce.yaml - name: many_actors_smoke_test group: core-scalability-test working_dir: benchmarks frequency: nightly team: core cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: distributed_smoke_test.yaml run: timeout: 3600 script: SMOKE_TEST=1 python distributed/test_many_actors.py wait_for_nodes: num_nodes: 2 - name: many_tasks group: core-scalability-test working_dir: benchmarks frequency: nightly team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: distributed.yaml run: timeout: 3600 script: python distributed/test_many_tasks.py --num-tasks=10000 wait_for_nodes: num_nodes: 65 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: distributed_gce.yaml - name: many_pgs group: core-scalability-test working_dir: benchmarks frequency: nightly-3x team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: distributed.yaml run: timeout: 3600 script: python distributed/test_many_pgs.py wait_for_nodes: num_nodes: 65 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: distributed_gce.yaml - name: many_pgs_smoke_test group: core-scalability-test working_dir: benchmarks frequency: nightly team: core cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: distributed_smoke_test.yaml run: timeout: 3600 script: SMOKE_TEST=1 python distributed/test_many_pgs.py wait_for_nodes: num_nodes: 2 - name: many_nodes group: core-scalability-test working_dir: benchmarks frequency: nightly-3x team: core env: aws_perf cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: many_nodes.yaml run: timeout: 3600 script: python distributed/test_many_tasks.py --num-tasks=1000 wait_for_nodes: num_nodes: 250 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: many_nodes_gce.yaml - name: scheduling_test_many_0s_tasks_many_nodes group: core-scalability-test working_dir: benchmarks frequency: nightly team: core cluster: byod: type: gpu runtime_env: - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so cluster_compute: scheduling.yaml run: timeout: 3600 script: python distributed/test_scheduling.py --total-num-task=1984000 --num-cpu-per-task=1 --task-duration-s=0 --total-num-actors=32 --num-actors-per-nodes=1 wait_for_nodes: num_nodes: 32 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: scheduling_gce.yaml # - name: scheduling_test_many_5s_tasks_single_node # group: core-scalability-test # working_dir: benchmarks # frequency: nightly # team: core # cluster: # cluster_compute: scheduling.yaml # run: # timeout: 3600 # script: python distributed/test_scheduling.py --total-num-task=1984000 --num-cpu-per-task=1 # --task-duration-s=5 --total-num-actors=1 --num-actors-per-nodes=1 # wait_for_nodes: # num_nodes: 32 # timeout: 600 # stable: false # - name: scheduling_test_many_5s_tasks_many_nodes # group: core-scalability-test # working_dir: benchmarks # frequency: nightly # team: core # cluster: # cluster_compute: scheduling.yaml # run: # timeout: 3600 # script: python distributed/test_scheduling.py --total-num-task=1984000 --num-cpu-per-task=1 # --task-duration-s=5 --total-num-actors=32 --num-actors-per-nodes=1 # wait_for_nodes: # num_nodes: 32 # timeout: 600 # stable: false ############### # Dataset tests ############### - name: parquet_metadata_resolution group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: single_node_benchmark_compute.yaml run: # Expect the test to finish around 40 seconds. timeout: 100 script: python parquet_metadata_resolution.py --num-files 915 --cloud aws variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml run: script: python parquet_metadata_resolution.py --num-files 915 --cloud gcp - name: dataset_random_access group: data-tests working_dir: nightly_tests/dataset stable: false frequency: manual team: data cluster: byod: type: gpu pip: - git+https://github.com/ray-project/ray_shuffling_data_loader.git@add-embedding-model cluster_compute: pipelined_training_compute.yaml run: timeout: 1200 script: python dataset_random_access.py wait_for_nodes: num_nodes: 15 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: pipelined_training_compute_gce.yaml - name: stable_diffusion_benchmark group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_stable_diffusion.sh cluster_compute: stable_diffusion_benchmark_compute.yaml run: timeout: 1800 script: python stable_diffusion_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: stable_diffusion_benchmark_compute_gce.yaml - name: streaming_data_ingest_benchmark_1tb group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: data_ingest_benchmark_compute.yaml run: timeout: 300 script: python data_ingest_benchmark.py --dataset-size-gb=1000 --num-workers=20 --streaming wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: data_ingest_benchmark_compute_gce.yaml - name: streaming_data_ingest_benchmark_100gb_gpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: data_ingest_benchmark_compute_gpu.yaml run: timeout: 300 script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu wait_for_nodes: num_nodes: 3 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml # This test case will early stop the data ingestion iteration on the GPU actors. # This is a common usage in PyTorch Lightning # (https://lightning.ai/docs/pytorch/stable/common/trainer.html#limit-train-batches). # There was a bug in Ray Data that caused GPU memoy leak (see #3.919). # We add this test case to cover this scenario. - name: streaming_data_ingest_benchmark_100gb_gpu_early_stop group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: data_ingest_benchmark_compute_gpu.yaml run: timeout: 300 script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu --early-stop wait_for_nodes: num_nodes: 3 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml - name: aggregate_benchmark group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: single_node_benchmark_compute.yaml run: timeout: 1800 script: python aggregate_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: read_parquet_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: single_node_benchmark_compute.yaml run: # Expect the benchmark to finish in 400 seconds. timeout: 400 script: python read_parquet_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: read_images_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: single_node_benchmark_compute.yaml run: timeout: 1800 script: python read_images_benchmark.py --single-node variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml # TODO: Re-enable this test once we fix https://github.com/ray-project/ray/issues/40686. # - name: read_images_benchmark_multi_node # group: data-tests # working_dir: nightly_tests/dataset # frequency: nightly-3x # team: data # cluster: # byod: # type: gpu # cluster_compute: multi_node_read_images_benchmark_compute.yaml # run: # timeout: 28800 # script: python read_images_benchmark.py --multi-node # variations: # - __suffix__: aws # - __suffix__: gce # env: gce # frequency: manual # cluster: # cluster_compute: multi_node_read_images_benchmark_compute_gce.yaml - name: read_images_comparison_microbenchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: single_worker_node_0_head_node_benchmark_compute.yaml run: timeout: 1800 script: bash run_image_loader_microbenchmark.sh variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: read_images_train_4_gpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_4_workers.yaml run: timeout: 18000 script: python multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - name: read_images_train_4_gpu_worker_chaos group: data-tests working_dir: nightly_tests frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: dataset/multi_node_train_4_workers.yaml run: timeout: 18000 prepare: python setup_chaos.py --kill-workers --kill-interval 100 --max-to-kill 3 --task-names "ReadImage->Map(wnid_to_index)->Map(crop_and_flip_image)" script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - name: read_images_train_4_gpu_node_chaos group: data-tests working_dir: nightly_tests frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: dataset/multi_node_train_4_workers.yaml run: timeout: 18000 prepare: python setup_chaos.py --kill-interval 200 --max-to-kill 1 --task-names "_RayTrainWorker__execute.get_next" script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - name: read_images_train_16_gpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_16_workers.yaml run: timeout: 18000 script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --use-gpu --num-epochs 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - name: read_images_train_16_gpu_preserve_order group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_16_workers.yaml run: timeout: 18000 script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --preserve-order --use-gpu --num-epochs 2 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - name: read_parquet_train_4_gpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_4_workers.yaml run: timeout: 3600 script: python multi_node_train_benchmark.py --num-workers 4 --file-type parquet --target-worker-gb 50 --use-gpu variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - name: read_parquet_train_16_gpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_16_workers.yaml run: timeout: 3600 script: python multi_node_train_benchmark.py --num-workers 16 --file-type parquet --target-worker-gb 50 --use-gpu variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - name: read_images_train_1_gpu_5_cpu group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: multi_node_train_1g5c.yaml run: timeout: 2400 script: python multi_node_train_benchmark.py --num-workers 1 --file-type image --use-gpu --num-epochs 2 --skip-train-model --prefetch-batches 16 --batch-size -1 --disable-locality-with-output variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: compute_gpu_1g5c_gce.yaml - name: read_tfrecords_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu post_build_script: byod_install_mosaicml.sh cluster_compute: single_node_benchmark_compute.yaml run: # Expect the benchmark to finish around 22 minutes. timeout: 1800 script: python read_tfrecords_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: map_batches_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: single_node_benchmark_compute.yaml run: # Expect the benchmark to finish around 30 minutes. timeout: 2400 script: python map_batches_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: iter_tensor_batches_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: single_node_benchmark_compute.yaml run: # Expect the benchmark to finish around 30 minutes. timeout: 2400 script: python iter_tensor_batches_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: iter_tensor_batches_benchmark_multi_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: multi_node_benchmark_compute.yaml run: # Expect the benchmark to finish within 90 minutes. timeout: 5400 script: python iter_tensor_batches_benchmark.py --data-size-gb=10 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: multi_node_benchmark_compute_gce.yaml - name: iter_batches_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset frequency: nightly team: data cluster: byod: type: gpu cluster_compute: single_node_benchmark_compute.yaml run: # Expect the benchmark to finish around 12 minutes. timeout: 1080 script: python iter_batches_benchmark.py variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: single_node_benchmark_compute_gce.yaml - name: dataset_shuffle_random_shuffle_1tb group: data-tests working_dir: nightly_tests frequency: nightly team: data cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: dataset_shuffle_sort_1tb group: data-tests working_dir: nightly_tests frequency: nightly team: data stable: False cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: dataset_shuffle_push_based_random_shuffle_1tb group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: dataset_shuffle_push_based_sort_1tb group: data-tests working_dir: nightly_tests frequency: nightly team: data stable: False cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: dataset_shuffle_push_based_random_shuffle_100tb group: data-tests working_dir: nightly_tests stable: false frequency: weekly team: data cluster: byod: runtime_env: - RAY_object_spilling_config={"type":"filesystem","params":{"directory_path":["/tmp/data0","/tmp/data1"]}} post_build_script: byod_dataset_shuffle.sh cluster_compute: shuffle/100tb_shuffle_compute.yaml run: timeout: 28800 script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=100000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 100 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/100tb_shuffle_compute_gce.yaml run: timeout: 28800 script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=40000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 100 ################## # Core Chaos tests ################## - name: chaos_many_tasks_kill_raylet group: core-nightly-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: chaos_test/compute_template.yaml run: timeout: 3600 wait_for_nodes: num_nodes: 10 prepare: python setup_chaos.py --no-start script: python chaos_test/test_chaos_basic.py --workload=tasks variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: chaos_test/compute_template_gce.yaml - name: chaos_many_tasks_terminate_instance group: core-nightly-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: chaos_test/compute_template.yaml run: timeout: 3600 wait_for_nodes: num_nodes: 10 prepare: python setup_chaos.py --no-start --chaos TerminateEC2Instance script: python chaos_test/test_chaos_basic.py --workload=tasks variations: - __suffix__: aws - name: chaos_many_actors_kill_raylet group: core-nightly-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: chaos_test/compute_template.yaml run: timeout: 4200 wait_for_nodes: num_nodes: 10 prepare: python setup_chaos.py --no-start script: python chaos_test/test_chaos_basic.py --workload=actors variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: chaos_test/compute_template_gce.yaml - name: chaos_many_actors_terminate_instance group: core-nightly-test working_dir: nightly_tests frequency: nightly team: core cluster: byod: {} cluster_compute: chaos_test/compute_template.yaml run: timeout: 4200 wait_for_nodes: num_nodes: 10 prepare: python setup_chaos.py --no-start --chaos TerminateEC2Instance script: python chaos_test/test_chaos_basic.py --workload=actors variations: - __suffix__: aws - name: chaos_dask_on_ray_large_scale_test_no_spilling group: data-tests working_dir: nightly_tests frequency: nightly team: data cluster: byod: runtime_env: - RAY_lineage_pinning_enabled=1 cluster_compute: dask_on_ray/chaos_dask_on_ray_stress_compute.yaml run: timeout: 7200 wait_for_nodes: num_nodes: 21 prepare: python setup_chaos.py --kill-interval 100 script: python dask_on_ray/large_scale_test.py --num_workers 20 --worker_obj_store_size_in_gb 20 --error_rate 0 --data_save_path /tmp/ray variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: dask_on_ray/dask_on_ray_stress_compute_gce.yaml - name: chaos_dask_on_ray_large_scale_test_spilling group: data-tests working_dir: nightly_tests frequency: nightly team: data cluster: byod: runtime_env: - RAY_lineage_pinning_enabled=1 cluster_compute: dask_on_ray/dask_on_ray_stress_compute.yaml run: timeout: 7200 wait_for_nodes: num_nodes: 21 prepare: python setup_chaos.py --kill-interval 100 script: python dask_on_ray/large_scale_test.py --num_workers 150 --worker_obj_store_size_in_gb 70 --error_rate 0 --data_save_path /tmp/ray variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: dask_on_ray/dask_on_ray_stress_compute_gce.yaml - name: chaos_dataset_shuffle_push_based_sort_1tb group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: byod: runtime_env: - RAY_worker_killing_policy=retriable_lifo pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 prepare: 'python setup_chaos.py --kill-interval 1200 --max-to-kill 3' script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: chaos_dataset_shuffle_sort_1tb group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: byod: runtime_env: - RAY_memory_monitor_refresh_ms=0 cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 prepare: 'python setup_chaos.py --kill-interval 900 --max-to-kill 3' script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: chaos_dataset_shuffle_random_shuffle_1tb group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: # leave oom disabled as test is marked unstable at the moment. byod: runtime_env: - RAY_memory_monitor_refresh_ms=0 pip: - ray[default] cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 prepare: ' python setup_chaos.py --kill-interval 600 --max-to-kill 2' script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - name: chaos_dataset_shuffle_push_based_random_shuffle_1tb group: data-tests working_dir: nightly_tests stable: false frequency: nightly team: data cluster: # leave oom disabled as test is marked unstable at the moment. byod: runtime_env: - RAY_memory_monitor_refresh_ms=0 cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml run: timeout: 7200 prepare: ' python setup_chaos.py --kill-interval 600 --max-to-kill 2' script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle wait_for_nodes: num_nodes: 20 variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml ##################### # Observability tests ##################### - name: agent_stress_test group: core-observability-test working_dir: dashboard frequency: nightly team: core cluster: byod: type: gpu runtime_env: - RAY_INTERNAL_MEM_PROFILE_COMPONENTS=dashboard_agent post_build_script: byod_agent_stress_test.sh cluster_compute: agent_stress_compute.yaml run: timeout: 14400 script: python mem_check.py --working-dir . variations: - __suffix__: aws - __suffix__: gce env: gce frequency: manual cluster: cluster_compute: agent_stress_compute_gce.yaml - name: k8s_serve_ha_test group: k8s-test working_dir: k8s_tests stable: false frequency: nightly team: serve cluster: byod: {} cluster_compute: compute_tpl.yaml run: timeout: 28800 # 8h prepare: bash prepare.sh script: python run_gcs_ft_on_k8s.py - name: aws_cluster_launcher group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: nightly team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 2400 script: python launch_and_verify_cluster.py aws/tests/aws_cluster.yaml --num-expected-nodes 2 --retries 10 - name: aws_cluster_launcher_nightly_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: nightly team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 2400 script: python launch_and_verify_cluster.py aws/tests/aws_cluster.yaml --num-expected-nodes 2 --retries 10 --docker-override nightly - name: aws_cluster_launcher_latest_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: nightly team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 2400 script: python launch_and_verify_cluster.py aws/tests/aws_cluster.yaml --num-expected-nodes 2 --retries 10 --docker-override latest - name: aws_cluster_launcher_release_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: manual team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 2400 script: python launch_and_verify_cluster.py aws/tests/aws_cluster.yaml --num-expected-nodes 2 --retries 10 --docker-override commit - name: aws_cluster_launcher_minimal group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: nightly team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 1200 script: python launch_and_verify_cluster.py aws/example-minimal.yaml - name: aws_cluster_launcher_full group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ frequency: nightly team: clusters cluster: byod: {} cluster_compute: aws/tests/aws_compute.yaml run: timeout: 3000 script: python launch_and_verify_cluster.py aws/example-full.yaml --num-expected-nodes 2 --retries 20 --docker-override latest - name: gcp_cluster_launcher_minimal group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: nightly team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 1200 script: python launch_and_verify_cluster.py gcp/example-minimal.yaml - name: gcp_cluster_launcher_full group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: nightly team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 4800 script: python launch_and_verify_cluster.py gcp/example-full.yaml --num-expected-nodes 2 --retries 30 - name: gcp_cluster_launcher_latest_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: nightly team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 3600 script: python launch_and_verify_cluster.py gcp/example-full.yaml --num-expected-nodes 2 --retries 20 --docker-override latest - name: gcp_cluster_launcher_nightly_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: nightly team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 3600 script: python launch_and_verify_cluster.py gcp/example-full.yaml --num-expected-nodes 2 --retries 20 --docker-override nightly - name: gcp_cluster_launcher_release_image group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: manual team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 3600 script: python launch_and_verify_cluster.py gcp/example-full.yaml --num-expected-nodes 2 --retries 20 --docker-override commit - name: gcp_cluster_launcher_gpu_docker group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ stable: true env: gce frequency: weekly team: clusters cluster: byod: {} cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml run: timeout: 1200 script: python launch_and_verify_cluster.py gcp/example-gpu-docker.yaml - name: autoscaler_aws group: autoscaler-test working_dir: autoscaling_tests stable: False frequency: nightly team: core cluster: # leave oom disabled as test is marked unstable at the moment. byod: runtime_env: - RAY_memory_monitor_refresh_ms=0 pip: - ray[default] cluster_compute: aws.yaml run: timeout: 1800 script: python run.py