llama3_full_predict.yaml 418 B

1234567891011121314151617181920212223
  1. ### model
  2. model_name_or_path: saves/llama3-8b/full/sft
  3. ### method
  4. stage: sft
  5. do_predict: true
  6. finetuning_type: full
  7. ### dataset
  8. eval_dataset: identity,alpaca_en_demo
  9. template: llama3
  10. cutoff_len: 1024
  11. max_samples: 50
  12. overwrite_cache: true
  13. preprocessing_num_workers: 16
  14. ### output
  15. output_dir: saves/llama3-8b/full/predict
  16. overwrite_output_dir: true
  17. ### eval
  18. per_device_eval_batch_size: 1
  19. predict_with_generate: true