templates.yaml 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. # Update anyscale/backend/workspace-template.yaml
  2. # <unique-template-id>:
  3. # emoji: 📊
  4. # title: Batch Inference
  5. # description: Description
  6. # path: Relative path to the template directory, from the Ray root directory
  7. # labels:
  8. # - ...
  9. # cluster_env:
  10. # ## Some sample `build_id`'s to choose from:
  11. # ## - anyscaleray-ml240-py39-gpu -> anyscale/ray-ml:2.4.0-py39-gpu
  12. # ## - anyscale240-py39 -> anyscale/ray:2.4.0-py39
  13. # build_id: anyscaleray-ml250-py39-gpu
  14. # ## OR, use a publicly hosted image
  15. # # byod:
  16. # # docker_image: url of docker image
  17. # # ray_version: 2.4.0
  18. # ## Make sure these compute configs don't contain region/cloud ID
  19. # compute_config:
  20. # GCP: doc/source/templates/configs/compute/gpu/gce.yaml
  21. # AWS: doc/source/templates/configs/compute/gpu/aws.yaml
  22. batch-inference-ray-data:
  23. emoji: 📊
  24. title: Batch Inference
  25. description: Parallelize batch inference of a dataset on a distributed Ray cluster with the Ray Data library. This template runs GPU batch inference on an image dataset using a PyTorch model.
  26. path: doc/source/templates/01_batch_inference
  27. labels:
  28. - Ray Data
  29. cluster_env:
  30. build_id: anyscaleray-ml250-py39-gpu
  31. compute_config:
  32. GCP: doc/source/templates/configs/compute/gpu/gce.yaml
  33. AWS: doc/source/templates/configs/compute/gpu/aws.yaml
  34. many-model-training-ray-tune:
  35. emoji: ⚡
  36. title: Many Model Training
  37. description: Train thousands of models in parallel on a distributed Ray cluster using the Ray Tune library. This template trains multiple forecasting models for different partitions of a time-series dataset and selects the best-performing model for each partition.
  38. path: doc/source/templates/02_many_model_training
  39. labels:
  40. - Ray Tune
  41. cluster_env:
  42. build_id: anyscaleray-ml250-py39-gpu
  43. compute_config:
  44. GCP: doc/source/templates/configs/compute/cpu/gce.yaml
  45. AWS: doc/source/templates/configs/compute/cpu/aws.yaml
  46. serve-stable-diffusion-model-ray-serve:
  47. emoji: 📡
  48. title: Serving a Stable Diffusion Model
  49. description: Deploy a stable diffusion model using the Ray Serve library and showcase its capabilities by generating images from text prompts! This template loads a pre-trained stable diffusion model from HuggingFace and serves it to a local endpoint.
  50. path: doc/source/templates/03_serving_stable_diffusion
  51. labels:
  52. - Ray Serve
  53. cluster_env:
  54. byod:
  55. docker_image: us-docker.pkg.dev/anyscale-workspace-templates/workspace-templates/serve-stable-diffusion-model-ray-serve:2.5.0
  56. ray_version: 2.5.0
  57. compute_config:
  58. GCP: doc/source/templates/configs/compute/gpu/gce.yaml
  59. AWS: doc/source/templates/configs/compute/gpu/aws.yaml