cleanup_experiment.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. """
  2. This script automates cleaning up a benchmark/experiment run of some algo
  3. against some config (with possibly more than one tune trial,
  4. e.g. torch=grid_search([True, False])).
  5. Run `python cleanup_experiment.py --help` for more information.
  6. Use on an input directory with trial contents e.g.:
  7. ..
  8. IMPALA_BreakoutNoFrameskip-v4_0_use_pytorch=False_2020-05-11_10-17-54topr3h9k
  9. IMPALA_BreakoutNoFrameskip-v4_0_use_pytorch=False_2020-05-11_13-59-35dqaetxnf
  10. IMPALA_BreakoutNoFrameskip-v4_0_use_pytorch=False_2020-05-11_17-21-28tbhedw72
  11. IMPALA_BreakoutNoFrameskip-v4_2_use_pytorch=True_2020-05-11_10-17-54lv20cgn_
  12. IMPALA_BreakoutNoFrameskip-v4_2_use_pytorch=True_2020-05-11_13-59-35kwzhax_y
  13. IMPALA_BreakoutNoFrameskip-v4_2_use_pytorch=True_2020-05-11_17-21-28a5j0s7za
  14. Then run:
  15. >> python cleanup_experiment.py --experiment-dir [parent dir w/ trial sub-dirs]
  16. >> --output-dir [your out dir] --results-filter dumb_col_2,superfluous_col3
  17. >> --results-max-size [max results file size in kb before(!) zipping]
  18. The script will create one output sub-dir for each trial and only copy
  19. the configuration and the csv results (filtered and every nth row removed
  20. based on the given args).
  21. """
  22. import argparse
  23. import json
  24. import os
  25. import re
  26. import shutil
  27. import yaml
  28. parser = argparse.ArgumentParser()
  29. parser.add_argument(
  30. "--experiment-dir",
  31. type=str,
  32. help="Experiment dir in which all sub-runs (seeds) are "
  33. "located (as sub-dirs). Each sub0-run dir must contain the files: "
  34. "params.json and progress.csv.",
  35. )
  36. parser.add_argument(
  37. "--output-dir",
  38. type=str,
  39. help="The output dir, in which the cleaned up output will be placed.",
  40. )
  41. parser.add_argument(
  42. "--results-filter",
  43. type=str,
  44. help="comma-separated list of csv fields to exclude.",
  45. default="experiment_id,pid,hostname,node_ip,trial_id,hist_stats/episode_"
  46. "reward,hist_stats/episode_lengths,experiment_tag",
  47. )
  48. parser.add_argument(
  49. "--results-max-size",
  50. type=int,
  51. help="the max. size of the final results.csv file (in kb). Will erase "
  52. "every nth line in the original input to reach that goal. "
  53. "Use 0 for no limit (default=100).",
  54. default=100,
  55. )
  56. def process_single_run(in_dir, out_dir):
  57. exp_dir = os.listdir(in_dir)
  58. # Make sure trials dir is ok.
  59. assert (
  60. "params.json" in exp_dir and "progress.csv" in exp_dir
  61. ), "params.json or progress.csv not found in {}!".format(in_dir)
  62. os.makedirs(out_dir, exist_ok=True)
  63. for file in exp_dir:
  64. absfile = os.path.join(in_dir, file)
  65. # Config file -> Convert to yaml and move to output dir.
  66. if file == "params.json":
  67. assert os.path.isfile(absfile), "{} not a file!".format(file)
  68. with open(absfile) as fp:
  69. contents = json.load(fp)
  70. with open(os.path.join(out_dir, "config.yaml"), "w") as fp:
  71. yaml.dump(contents, fp)
  72. # Progress csv file -> Filter out some columns, cut, and write to
  73. # output_dir.
  74. elif file == "progress.csv":
  75. assert os.path.isfile(absfile), "{} not a file!".format(file)
  76. col_idx_to_filter = []
  77. with open(absfile) as fp:
  78. # Get column names.
  79. col_names_orig = fp.readline().strip().split(",")
  80. # Split by comma (abiding to quotes), filter out
  81. # unwanted columns, then write to disk.
  82. cols_to_filter = args.results_filter.split(",")
  83. for i, c in enumerate(col_names_orig):
  84. if c in cols_to_filter:
  85. col_idx_to_filter.insert(0, i)
  86. col_names = col_names_orig.copy()
  87. for idx in col_idx_to_filter:
  88. col_names.pop(idx)
  89. absfile_out = os.path.join(out_dir, "progress.csv")
  90. with open(absfile_out, "w") as out_fp:
  91. print(",".join(col_names), file=out_fp)
  92. while True:
  93. line = fp.readline().strip()
  94. if not line:
  95. break
  96. line = re.sub(
  97. "(,{2,})",
  98. lambda m: ",None" * (len(m.group()) - 1) + ",",
  99. line,
  100. )
  101. cols = re.findall('".+?"|[^,]+', line)
  102. if len(cols) != len(col_names_orig):
  103. continue
  104. for idx in col_idx_to_filter:
  105. cols.pop(idx)
  106. print(",".join(cols), file=out_fp)
  107. # Reduce the size of the output file if necessary.
  108. out_size = os.path.getsize(absfile_out)
  109. max_size = args.results_max_size * 1024
  110. if 0 < max_size < out_size:
  111. # Figure out roughly every which line we have to drop.
  112. ratio = out_size / max_size
  113. # If ratio > 2.0, we'll have to keep only every nth line.
  114. if ratio > 2.0:
  115. nth = out_size // max_size
  116. os.system(
  117. "awk 'NR==1||NR%{}==0' {} > {}.new".format(
  118. nth, absfile_out, absfile_out
  119. )
  120. )
  121. # If ratio < 2.0 (>1.0), we'll have to drop every nth line.
  122. else:
  123. nth = out_size // (out_size - max_size)
  124. os.system(
  125. "awk 'NR==1||NR%{}!=0' {} > {}.new".format(
  126. nth, absfile_out, absfile_out
  127. )
  128. )
  129. os.remove(absfile_out)
  130. os.rename(absfile_out + ".new", absfile_out)
  131. # Zip progress.csv into results.zip.
  132. zip_file = os.path.join(out_dir, "results.zip")
  133. try:
  134. os.remove(zip_file)
  135. except FileNotFoundError:
  136. pass
  137. os.system(
  138. "zip -j {} {}".format(zip_file, os.path.join(out_dir, "progress.csv"))
  139. )
  140. os.remove(os.path.join(out_dir, "progress.csv"))
  141. # TBX events file -> Move as is.
  142. elif re.search("^(events\\.out\\.|params\\.pkl)", file):
  143. assert os.path.isfile(absfile), "{} not a file!".format(file)
  144. shutil.copyfile(absfile, os.path.join(out_dir, file))
  145. if __name__ == "__main__":
  146. args = parser.parse_args()
  147. exp_dir = os.listdir(args.experiment_dir)
  148. # Loop through all sub-directories.
  149. for i, sub_run in enumerate(sorted(exp_dir)):
  150. abspath = os.path.join(args.experiment_dir, sub_run)
  151. # This is a seed run.
  152. if os.path.isdir(abspath) and re.search(
  153. "^(\\w+?)_(\\w+?-v\\d+)(_\\d+)", sub_run
  154. ):
  155. # Create meaningful output dir name:
  156. # [algo]_[env]_[trial #]_[trial-config]_[date YYYY-MM-DD].
  157. cleaned_up_out = re.sub(
  158. "^(\\w+?)_(\\w+?-v\\d+)(_\\d+)(_.+)?(_\\d{4}-\\d{2}-\\d{2})"
  159. "_\\d{2}-\\d{2}-\\w+",
  160. "{:02}_\\1_\\2\\4\\5".format(i),
  161. sub_run,
  162. )
  163. # Remove superflous `env=` specifier (anv always included in name).
  164. cleaned_up_out = re.sub(
  165. "^(.+)env=\\w+?-v\\d+,?(.+)", "\\1\\2", cleaned_up_out
  166. )
  167. out_path = os.path.join(args.output_dir, cleaned_up_out)
  168. process_single_run(abspath, out_path)
  169. # Done.
  170. print("done")