docker-compose.yml 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. ## ===================================================
  2. # docker-compose.yml
  3. ## ===================================================
  4. # 1. 请在以下方案中选择任意一种,然后删除其他的方案
  5. # 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
  6. # 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
  7. # 【方法1: 适用于Linux,很方便,可惜windows不支持】与宿主的网络融合为一体,这个是默认配置
  8. # network_mode: "host"
  9. # 【方法2: 适用于所有系统包括Windows和MacOS】端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
  10. # ports:
  11. # - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应
  12. # 4. 最后`docker-compose up`运行
  13. # 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项
  14. ## ===================================================
  15. # 1. Please choose one of the following options and delete the others.
  16. # 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details.
  17. # 3. Choose a method to expose the server port and make the corresponding configuration changes:
  18. # [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration
  19. # network_mode: "host"
  20. # [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content)
  21. # ports:
  22. # - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable.
  23. # 4. Finally, run `docker-compose up`.
  24. # 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options.
  25. ## ===================================================
  26. ## ===================================================
  27. ## 【方案零】 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
  28. ## ===================================================
  29. version: '3'
  30. services:
  31. gpt_academic_full_capability:
  32. image: ghcr.io/binary-husky/gpt_academic_with_all_capacity:master
  33. environment:
  34. # 请查阅 `config.py`或者 github wiki 以查看所有的配置信息
  35. API_KEY: ' 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 '
  36. # USE_PROXY: ' True '
  37. # proxies: ' { "http": "http://localhost:10881", "https": "http://localhost:10881", } '
  38. LLM_MODEL: ' gpt-3.5-turbo '
  39. AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4", "qianfan", "sparkv2", "spark", "chatglm"] '
  40. BAIDU_CLOUD_API_KEY : ' bTUtwEAveBrQipEowUvDwYWq '
  41. BAIDU_CLOUD_SECRET_KEY : ' jqXtLvXiVw6UNdjliATTS61rllG8Iuni '
  42. XFYUN_APPID: ' 53a8d816 '
  43. XFYUN_API_SECRET: ' MjMxNDQ4NDE4MzM0OSNlNjQ2NTlhMTkx '
  44. XFYUN_API_KEY: ' 95ccdec285364869d17b33e75ee96447 '
  45. ENABLE_AUDIO: ' False '
  46. DEFAULT_WORKER_NUM: ' 20 '
  47. WEB_PORT: ' 12345 '
  48. ADD_WAIFU: ' False '
  49. ALIYUN_APPKEY: ' RxPlZrM88DnAFkZK '
  50. THEME: ' Chuanhu-Small-and-Beautiful '
  51. ALIYUN_ACCESSKEY: ' LTAI5t6BrFUzxRXVGUWnekh1 '
  52. ALIYUN_SECRET: ' eHmI20SVWIwQZxCiTD2bGQVspP9i68 '
  53. # LOCAL_MODEL_DEVICE: ' cuda '
  54. # 加载英伟达显卡运行时
  55. # runtime: nvidia
  56. # deploy:
  57. # resources:
  58. # reservations:
  59. # devices:
  60. # - driver: nvidia
  61. # count: 1
  62. # capabilities: [gpu]
  63. # 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合
  64. network_mode: "host"
  65. # 【WEB_PORT暴露方法2: 适用于所有系统】端口映射
  66. # ports:
  67. # - "12345:12345" # 12345必须与WEB_PORT相互对应
  68. # 启动容器后,运行main.py主程序
  69. command: >
  70. bash -c "python3 -u main.py"
  71. ## ===================================================
  72. ## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
  73. ## ===================================================
  74. version: '3'
  75. services:
  76. gpt_academic_nolocalllms:
  77. image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal)
  78. environment:
  79. # 请查阅 `config.py` 以查看所有的配置信息
  80. API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
  81. USE_PROXY: ' True '
  82. proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
  83. LLM_MODEL: ' gpt-3.5-turbo '
  84. AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "sparkv2", "qianfan"] '
  85. WEB_PORT: ' 22303 '
  86. ADD_WAIFU: ' True '
  87. # THEME: ' Chuanhu-Small-and-Beautiful '
  88. # DEFAULT_WORKER_NUM: ' 10 '
  89. # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
  90. # 与宿主的网络融合
  91. network_mode: "host"
  92. # 不使用代理网络拉取最新代码
  93. command: >
  94. bash -c "python3 -u main.py"
  95. ### ===================================================
  96. ### 【方案二】 如果需要运行ChatGLM + Qwen + MOSS等本地模型
  97. ### ===================================================
  98. version: '3'
  99. services:
  100. gpt_academic_with_chatglm:
  101. image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM)
  102. environment:
  103. # 请查阅 `config.py` 以查看所有的配置信息
  104. API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
  105. USE_PROXY: ' True '
  106. proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
  107. LLM_MODEL: ' gpt-3.5-turbo '
  108. AVAIL_LLM_MODELS: ' ["chatglm", "qwen", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] '
  109. LOCAL_MODEL_DEVICE: ' cuda '
  110. DEFAULT_WORKER_NUM: ' 10 '
  111. WEB_PORT: ' 12303 '
  112. ADD_WAIFU: ' True '
  113. # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
  114. # 显卡的使用,nvidia0指第0个GPU
  115. runtime: nvidia
  116. devices:
  117. - /dev/nvidia0:/dev/nvidia0
  118. # 与宿主的网络融合
  119. network_mode: "host"
  120. command: >
  121. bash -c "python3 -u main.py"
  122. # P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖
  123. # command: >
  124. # bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
  125. ### ===================================================
  126. ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
  127. ### ===================================================
  128. version: '3'
  129. services:
  130. gpt_academic_with_rwkv:
  131. image: ghcr.io/binary-husky/gpt_academic_jittorllms:master
  132. environment:
  133. # 请查阅 `config.py` 以查看所有的配置信息
  134. API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
  135. USE_PROXY: ' True '
  136. proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
  137. LLM_MODEL: ' gpt-3.5-turbo '
  138. AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] '
  139. LOCAL_MODEL_DEVICE: ' cuda '
  140. DEFAULT_WORKER_NUM: ' 10 '
  141. WEB_PORT: ' 12305 '
  142. ADD_WAIFU: ' True '
  143. # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
  144. # 显卡的使用,nvidia0指第0个GPU
  145. runtime: nvidia
  146. devices:
  147. - /dev/nvidia0:/dev/nvidia0
  148. # 与宿主的网络融合
  149. network_mode: "host"
  150. # 不使用代理网络拉取最新代码
  151. command: >
  152. python3 -u main.py
  153. ## ===================================================
  154. ## 【方案四】 ChatGPT + Latex
  155. ## ===================================================
  156. version: '3'
  157. services:
  158. gpt_academic_with_latex:
  159. image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex)
  160. environment:
  161. # 请查阅 `config.py` 以查看所有的配置信息
  162. API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
  163. USE_PROXY: ' True '
  164. proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
  165. LLM_MODEL: ' gpt-3.5-turbo '
  166. AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] '
  167. LOCAL_MODEL_DEVICE: ' cuda '
  168. DEFAULT_WORKER_NUM: ' 10 '
  169. WEB_PORT: ' 12303 '
  170. # 与宿主的网络融合
  171. network_mode: "host"
  172. # 不使用代理网络拉取最新代码
  173. command: >
  174. bash -c "python3 -u main.py"
  175. ## ===================================================
  176. ## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
  177. ## ===================================================
  178. version: '3'
  179. services:
  180. gpt_academic_with_audio:
  181. image: ghcr.io/binary-husky/gpt_academic_audio_assistant:master
  182. environment:
  183. # 请查阅 `config.py` 以查看所有的配置信息
  184. API_KEY: ' fk195831-IdP0Pb3W6DCMUIbQwVX6MsSiyxwqybyS '
  185. USE_PROXY: ' False '
  186. proxies: ' None '
  187. LLM_MODEL: ' gpt-3.5-turbo '
  188. AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] '
  189. ENABLE_AUDIO: ' True '
  190. LOCAL_MODEL_DEVICE: ' cuda '
  191. DEFAULT_WORKER_NUM: ' 20 '
  192. WEB_PORT: ' 12343 '
  193. ADD_WAIFU: ' True '
  194. THEME: ' Chuanhu-Small-and-Beautiful '
  195. ALIYUN_APPKEY: ' RoP1ZrM84DnAFkZK '
  196. ALIYUN_TOKEN: ' f37f30e0f9934c34a992f6f64f7eba4f '
  197. # (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 '
  198. # (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 '
  199. # 与宿主的网络融合
  200. network_mode: "host"
  201. # 不使用代理网络拉取最新代码
  202. command: >
  203. bash -c "python3 -u main.py"