sweep.sh 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. set -x
  2. export TRANSFORMERS_CACHE=/tmp/hf-cache
  3. branch1=$1
  4. branch2=$2
  5. gptneo_models="EleutherAI/gpt-neo-2.7B EleutherAI/gpt-neo-1.3B EleutherAI/gpt-neo-125M"
  6. gpt2_models="gpt2 gpt2-large gpt2-xl"
  7. gptj_models="EleutherAI/gpt-j-6B"
  8. opt_models="facebook/opt-125m facebook/opt-1.3b facebook/opt-2.7b facebook/opt-6.7b facebook/opt-13b"
  9. bloom_models="bigscience/bloom-560m bigscience/bloom-1b7 bigscience/bloom-3b bigscience/bloom-7b1"
  10. for gpus in `echo "1 2 4 8"`; do
  11. for dtype in `echo "fp16 fp32"`; do
  12. for graphs in `echo "true false"`; do
  13. for kernel in `echo "true false"`; do
  14. params="$dtype $graphs $kernel $gpus"
  15. for m in `echo "$gptneo_models"`; do
  16. bash run_model.sh $m $branch1 $branch2 $params
  17. done
  18. for m in `echo "$gpt2_models"`; do
  19. bash run_model.sh $m $branch1 $branch2 $params
  20. done
  21. for m in `echo "$gptj_models"`; do
  22. bash run_model.sh $m $branch1 $branch2 $params
  23. done
  24. for m in `echo "$opt_models"`; do
  25. bash run_model.sh $m $branch1 $branch2 $params
  26. done
  27. for m in `echo "$bloom_models"`; do
  28. bash run_model.sh $m $branch1 $branch2 $params
  29. done
  30. done
  31. done
  32. done
  33. done