run_microbenchmark.py 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. import json
  2. import os
  3. import argparse
  4. def to_dict_key(key: str):
  5. for r in [" ", ":", "-"]:
  6. key = key.replace(r, "_")
  7. for r in ["(", ")"]:
  8. key = key.replace(r, "")
  9. return key
  10. if __name__ == "__main__":
  11. parser = argparse.ArgumentParser()
  12. parser.add_argument(
  13. "--experimental",
  14. action="store_true",
  15. default=False,
  16. help="If passed, run ray.experimental microbenchmarks.",
  17. )
  18. args = parser.parse_args()
  19. if args.experimental:
  20. from ray._private.ray_experimental_perf import main
  21. else:
  22. from ray._private.ray_perf import main
  23. results = main() or []
  24. result_dict = {
  25. f"{to_dict_key(v[0])}": (v[1], v[2]) for v in results if v is not None
  26. }
  27. perf_metrics = [
  28. {
  29. "perf_metric_name": to_dict_key(v[0]),
  30. "perf_metric_value": v[1],
  31. "perf_metric_type": "THROUGHPUT",
  32. }
  33. for v in results
  34. if v is not None
  35. ]
  36. result_dict["perf_metrics"] = perf_metrics
  37. test_output_json = os.environ.get("TEST_OUTPUT_JSON", "/tmp/microbenchmark.json")
  38. with open(test_output_json, "wt") as f:
  39. json.dump(result_dict, f)