get_llm_responses_retriever.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. # Copyright 2023 https://github.com/ShishirPatil/gorilla
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import argparse
  15. import sys
  16. import json
  17. import openai
  18. import anthropic
  19. import multiprocessing as mp
  20. import os
  21. import time
  22. from retrievers import *
  23. from retrievers.build_json_index import JSONLReader
  24. def encode_question(question, api_name):
  25. """Encode multiple prompt instructions into a single string."""
  26. prompts = []
  27. if api_name == "torchhub":
  28. domains = "1. $DOMAIN is inferred from the task description and should include one of {Classification, Semantic Segmentation, Object Detection, Audio Separation, Video Classification, Text-to-Speech}."
  29. elif api_name == "huggingface":
  30. domains = "1. $DOMAIN should include one of {Multimodal Feature Extraction, Multimodal Text-to-Image, Multimodal Image-to-Text, Multimodal Text-to-Video, \
  31. Multimodal Visual Question Answering, Multimodal Document Question Answer, Multimodal Graph Machine Learning, Computer Vision Depth Estimation,\
  32. Computer Vision Image Classification, Computer Vision Object Detection, Computer Vision Image Segmentation, Computer Vision Image-to-Image, \
  33. Computer Vision Unconditional Image Generation, Computer Vision Video Classification, Computer Vision Zero-Shor Image Classification, \
  34. Natural Language Processing Text Classification, Natural Language Processing Token Classification, Natural Language Processing Table Question Answering, \
  35. Natural Language Processing Question Answering, Natural Language Processing Zero-Shot Classification, Natural Language Processing Translation, \
  36. Natural Language Processing Summarization, Natural Language Processing Conversational, Natural Language Processing Text Generation, Natural Language Processing Fill-Mask,\
  37. Natural Language Processing Text2Text Generation, Natural Language Processing Sentence Similarity, Audio Text-to-Speech, Audio Automatic Speech Recognition, \
  38. Audio Audio-to-Audio, Audio Audio Classification, Audio Voice Activity Detection, Tabular Tabular Classification, Tabular Tabular Regression, \
  39. Reinforcement Learning Reinforcement Learning, Reinforcement Learning Robotics }"
  40. elif api_name == "tensorhub":
  41. domains = "1. $DOMAIN is inferred from the task description and should include one of {text-sequence-alignment, text-embedding, text-language-model, text-preprocessing, text-classification, text-generation, text-question-answering, text-retrieval-question-answering, text-segmentation, text-to-mel, image-classification, image-feature-vector, image-object-detection, image-segmentation, image-generator, image-pose-detection, image-rnn-agent, image-augmentation, image-classifier, image-style-transfer, image-aesthetic-quality, image-depth-estimation, image-super-resolution, image-deblurring, image-extrapolation, image-text-recognition, image-dehazing, image-deraining, image-enhancemenmt, image-classification-logits, image-frame-interpolation, image-text-detection, image-denoising, image-others, video-classification, video-feature-extraction, video-generation, video-audio-text, video-text, audio-embedding, audio-event-classification, audio-command-detection, audio-paralinguists-classification, audio-speech-to-text, audio-speech-synthesis, audio-synthesis, audio-pitch-extraction}"
  42. else:
  43. print("Error: API name is not supported.")
  44. prompt = question + "\nWrite a python program in 1 to 2 lines to call API in " + api_name + ".\n\nThe answer should follow the format: <<<domain>>> $DOMAIN, <<<api_call>>>: $API_CALL, <<<api_provider>>>: $API_PROVIDER, <<<explanation>>>: $EXPLANATION, <<<code>>>: $CODE}. Here are the requirements:\n" + domains + "\n2. The $API_CALL should have only 1 line of code that calls api.\n3. The $API_PROVIDER should be the programming framework used.\n4. $EXPLANATION should be a step-by-step explanation.\n5. The $CODE is the python code.\n6. Do not repeat the format in your answer."
  45. prompts.append({"role": "system", "content": "You are a helpful API writer who can write APIs based on requirements."})
  46. prompts.append({"role": "user", "content": prompt})
  47. return prompts
  48. def get_response(get_response_input, api_key):
  49. question, question_id, api_name, model, retrieved_doc = get_response_input
  50. question = encode_question(question, api_name)
  51. question[-1]["content"] = question[-1]["content"] + "\nHere are some reference docs:"
  52. for i, doc in enumerate(retrieved_doc):
  53. question[-1]["content"] = question[-1]["content"] + "\nAPI " + str(i) + ": " + str(doc)
  54. try:
  55. if "gpt" in model:
  56. openai.api_key = api_key
  57. responses = openai.ChatCompletion.create(
  58. model=model,
  59. messages=question,
  60. n=1,
  61. temperature=0,
  62. )
  63. response = responses['choices'][0]['message']['content']
  64. elif "claude" in model:
  65. client = anthropic.Anthropic(api_key=api_key)
  66. responses = client.completions.create(
  67. prompt=f"{anthropic.HUMAN_PROMPT} {question[0]['content']}{question[1]['content']}{anthropic.AI_PROMPT}",
  68. stop_sequences=[anthropic.HUMAN_PROMPT],
  69. model="claude-v1",
  70. max_tokens_to_sample=2048,
  71. )
  72. response = responses.completion.strip()
  73. else:
  74. print("Error: Model is not supported.")
  75. except Exception as e:
  76. print("Error:", e)
  77. return None
  78. print("=>",)
  79. return {'text': response, "question_id": question_id, "answer_id": "None", "model_id": model, "metadata": {}}
  80. def process_entry(entry, api_key):
  81. question, question_id, api_name, model, retriever = entry
  82. retrieved_doc = retriever.get_relevant_documents(question)
  83. result = get_response((question, question_id, api_name, model, retrieved_doc), api_key)
  84. return result
  85. def write_result_to_file(result, output_file):
  86. global file_write_lock
  87. with file_write_lock:
  88. with open(output_file, "a") as outfile:
  89. json.dump(result, outfile)
  90. outfile.write("\n")
  91. def callback_with_lock(result, output_file):
  92. global file_write_lock
  93. write_result_to_file(result, output_file, file_write_lock)
  94. if __name__ == '__main__':
  95. parser = argparse.ArgumentParser()
  96. parser.add_argument("--model", type=str, default=None, help="which model you want to use for eval, only support ['gpt*', 'claude*'] now")
  97. parser.add_argument("--api_key", type=str, default=None, help="the api key provided for calling")
  98. parser.add_argument("--output_file", type=str, default=None, help="the output file this script writes to")
  99. parser.add_argument("--question_data", type=str, default=None, help="path to the questions data file")
  100. parser.add_argument("--api_name", type=str, default=None, help="this will be the api dataset name you are testing, only support ['torchhub', 'tensorhun', 'huggingface'] now")
  101. parser.add_argument("--retriever", type=str, default="bm25", help="which retriever to use")
  102. parser.add_argument("--num_doc", type=int, default=1, help="top k docs to use")
  103. parser.add_argument("--api_dataset", type=str, default=None, help="path to the api data")
  104. args = parser.parse_args()
  105. assert args.retriever in ["bm25", "gpt"]
  106. if args.retriever == "gpt":
  107. retriever = GPTRetriever(query_kwargs={"similarity_top_k": args.num_doc})
  108. if os.path.exists(args.retriever + '_dataset_index.json'):
  109. print('data index already saved')
  110. os.environ["OPENAI_API_KEY"] = args.api_key
  111. index = retriever.load_from_disk(args.retriever + '_dataset_index.json')
  112. else:
  113. print('data index being created')
  114. os.environ["OPENAI_API_KEY"] = args.api_key
  115. documents = JSONLReader().load_data(args.api_dataset)
  116. index = retriever.from_documents(documents)
  117. retriever.save_to_disk(index, args.retriever + '_dataset_index.json')
  118. elif args.retriever == "bm25":
  119. from rank_bm25 import BM25Okapi
  120. corpus = []
  121. with open(args.api_dataset, 'r') as f:
  122. for line in f:
  123. corpus.append(json.loads(line))
  124. tokenized_corpus = [str(doc).split(" ") for doc in corpus]
  125. bm25 = BM25Okapi(tokenized_corpus)
  126. retriever = BM25Retriever(index=bm25, corpus=corpus, query_kwargs={"similarity_top_k": args.num_doc})
  127. else:
  128. assert False
  129. start_time = time.time()
  130. # Read the question file
  131. questions = []
  132. question_ids = []
  133. with open(args.question_data, 'r') as f:
  134. for idx, line in enumerate(f):
  135. questions.append(json.loads(line)["text"])
  136. question_ids.append(json.loads(line)["question_id"])
  137. file_write_lock = mp.Lock()
  138. with mp.Pool(1) as pool:
  139. results = []
  140. for idx, (question, question_id) in enumerate(zip(questions, question_ids)):
  141. result = pool.apply_async(
  142. process_entry,
  143. args=((question, question_id, args.api_name, args.model, retriever), args.api_key),
  144. callback=lambda result: write_result_to_file(result, args.output_file),
  145. )
  146. results.append(result)
  147. pool.close()
  148. pool.join()
  149. end_time = time.time()
  150. print("Total time used: ", end_time - start_time)