inference_hosted.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. import openai
  2. import json
  3. openai.api_key = "EMPTY"
  4. openai.api_base = "http://luigi.millennium.berkeley.edu:8000/v1"
  5. # Example dummy function hard coded to return the same weather
  6. # In production, this could be your backend API or an external API
  7. def get_current_weather(location, unit="fahrenheit"):
  8. """Get the current weather in a given location"""
  9. weather_info = {
  10. "location": location,
  11. "temperature": "72",
  12. "unit": unit,
  13. "forecast": ["sunny", "windy"],
  14. }
  15. return json.dumps(weather_info)
  16. def run_conversation():
  17. # Step 1: send the conversation and available functions to GPT
  18. messages = [{"role": "user", "content": "What's the weather like in the two cities of Boston and San Francisco?"}]
  19. functions = [
  20. {
  21. "name": "get_current_weather",
  22. "description": "Get the current weather in a given location",
  23. "parameters": {
  24. "type": "object",
  25. "properties": {
  26. "location": {
  27. "type": "string",
  28. "description": "The city and state, e.g. San Francisco, CA",
  29. },
  30. "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
  31. },
  32. "required": ["location"],
  33. },
  34. }
  35. ]
  36. completion = openai.ChatCompletion.create(
  37. model='gorilla-openfunctions-v2',
  38. messages=messages,
  39. functions=functions,
  40. function_call="auto", # auto is default, but we'll be explicit
  41. )
  42. print("--------------------")
  43. print(f"Function call strings(s): {completion.choices[0].message.content}")
  44. print("--------------------")
  45. print(f"OpenAI compatible `function_call`: {completion.choices[0].message.function_call}")
  46. run_conversation()