bridge_all.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. """
  2. 该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节
  3. 不具备多线程能力的函数:正常对话时使用,具备完备的交互功能,不可多线程
  4. 1. predict(...)
  5. 具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
  6. 2. predict_no_ui_long_connection(...)
  7. """
  8. import tiktoken, copy
  9. from functools import lru_cache
  10. from concurrent.futures import ThreadPoolExecutor
  11. from toolbox import get_conf, trimmed_format_exc
  12. from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
  13. from .bridge_chatgpt import predict as chatgpt_ui
  14. from .bridge_chatgpt_vision import predict_no_ui_long_connection as chatgpt_vision_noui
  15. from .bridge_chatgpt_vision import predict as chatgpt_vision_ui
  16. from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
  17. from .bridge_chatglm import predict as chatglm_ui
  18. from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui
  19. from .bridge_chatglm3 import predict as chatglm3_ui
  20. from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
  21. from .bridge_qianfan import predict as qianfan_ui
  22. from .bridge_google_gemini import predict as genai_ui
  23. from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
  24. colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
  25. class LazyloadTiktoken(object):
  26. def __init__(self, model):
  27. self.model = model
  28. @staticmethod
  29. @lru_cache(maxsize=128)
  30. def get_encoder(model):
  31. print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
  32. tmp = tiktoken.encoding_for_model(model)
  33. print('加载tokenizer完毕')
  34. return tmp
  35. def encode(self, *args, **kwargs):
  36. encoder = self.get_encoder(self.model)
  37. return encoder.encode(*args, **kwargs)
  38. def decode(self, *args, **kwargs):
  39. encoder = self.get_encoder(self.model)
  40. return encoder.decode(*args, **kwargs)
  41. # Endpoint 重定向
  42. API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE")
  43. openai_endpoint = "https://api.openai.com/v1/chat/completions"
  44. api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
  45. newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
  46. if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
  47. azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
  48. # 兼容旧版的配置
  49. try:
  50. API_URL = get_conf("API_URL")
  51. if API_URL != "https://api.openai.com/v1/chat/completions":
  52. openai_endpoint = API_URL
  53. print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
  54. except:
  55. pass
  56. # 新版配置
  57. if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
  58. if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
  59. if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
  60. # 获取tokenizer
  61. tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
  62. tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
  63. get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
  64. get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
  65. # 开始初始化模型
  66. AVAIL_LLM_MODELS, LLM_MODEL = get_conf("AVAIL_LLM_MODELS", "LLM_MODEL")
  67. AVAIL_LLM_MODELS = AVAIL_LLM_MODELS + [LLM_MODEL]
  68. # -=-=-=-=-=-=- 以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-
  69. model_info = {
  70. # openai
  71. "gpt-3.5-turbo": {
  72. "fn_with_ui": chatgpt_ui,
  73. "fn_without_ui": chatgpt_noui,
  74. "endpoint": openai_endpoint,
  75. "max_token": 4096,
  76. "tokenizer": tokenizer_gpt35,
  77. "token_cnt": get_token_num_gpt35,
  78. },
  79. "gpt-3.5-turbo-16k": {
  80. "fn_with_ui": chatgpt_ui,
  81. "fn_without_ui": chatgpt_noui,
  82. "endpoint": openai_endpoint,
  83. "max_token": 16385,
  84. "tokenizer": tokenizer_gpt35,
  85. "token_cnt": get_token_num_gpt35,
  86. },
  87. "gpt-3.5-turbo-0613": {
  88. "fn_with_ui": chatgpt_ui,
  89. "fn_without_ui": chatgpt_noui,
  90. "endpoint": openai_endpoint,
  91. "max_token": 4096,
  92. "tokenizer": tokenizer_gpt35,
  93. "token_cnt": get_token_num_gpt35,
  94. },
  95. "gpt-3.5-turbo-16k-0613": {
  96. "fn_with_ui": chatgpt_ui,
  97. "fn_without_ui": chatgpt_noui,
  98. "endpoint": openai_endpoint,
  99. "max_token": 16385,
  100. "tokenizer": tokenizer_gpt35,
  101. "token_cnt": get_token_num_gpt35,
  102. },
  103. "gpt-3.5-turbo-1106": {#16k
  104. "fn_with_ui": chatgpt_ui,
  105. "fn_without_ui": chatgpt_noui,
  106. "endpoint": openai_endpoint,
  107. "max_token": 16385,
  108. "tokenizer": tokenizer_gpt35,
  109. "token_cnt": get_token_num_gpt35,
  110. },
  111. "gpt-4": {
  112. "fn_with_ui": chatgpt_ui,
  113. "fn_without_ui": chatgpt_noui,
  114. "endpoint": openai_endpoint,
  115. "max_token": 8192,
  116. "tokenizer": tokenizer_gpt4,
  117. "token_cnt": get_token_num_gpt4,
  118. },
  119. "gpt-4-32k": {
  120. "fn_with_ui": chatgpt_ui,
  121. "fn_without_ui": chatgpt_noui,
  122. "endpoint": openai_endpoint,
  123. "max_token": 32768,
  124. "tokenizer": tokenizer_gpt4,
  125. "token_cnt": get_token_num_gpt4,
  126. },
  127. "gpt-4-1106-preview": {
  128. "fn_with_ui": chatgpt_ui,
  129. "fn_without_ui": chatgpt_noui,
  130. "endpoint": openai_endpoint,
  131. "max_token": 128000,
  132. "tokenizer": tokenizer_gpt4,
  133. "token_cnt": get_token_num_gpt4,
  134. },
  135. "gpt-3.5-random": {
  136. "fn_with_ui": chatgpt_ui,
  137. "fn_without_ui": chatgpt_noui,
  138. "endpoint": openai_endpoint,
  139. "max_token": 4096,
  140. "tokenizer": tokenizer_gpt4,
  141. "token_cnt": get_token_num_gpt4,
  142. },
  143. "gpt-4-vision-preview": {
  144. "fn_with_ui": chatgpt_vision_ui,
  145. "fn_without_ui": chatgpt_vision_noui,
  146. "endpoint": openai_endpoint,
  147. "max_token": 4096,
  148. "tokenizer": tokenizer_gpt4,
  149. "token_cnt": get_token_num_gpt4,
  150. },
  151. # azure openai
  152. "azure-gpt-3.5":{
  153. "fn_with_ui": chatgpt_ui,
  154. "fn_without_ui": chatgpt_noui,
  155. "endpoint": azure_endpoint,
  156. "max_token": 4096,
  157. "tokenizer": tokenizer_gpt35,
  158. "token_cnt": get_token_num_gpt35,
  159. },
  160. "azure-gpt-4":{
  161. "fn_with_ui": chatgpt_ui,
  162. "fn_without_ui": chatgpt_noui,
  163. "endpoint": azure_endpoint,
  164. "max_token": 8192,
  165. "tokenizer": tokenizer_gpt4,
  166. "token_cnt": get_token_num_gpt4,
  167. },
  168. # api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
  169. "api2d-gpt-3.5-turbo": {
  170. "fn_with_ui": chatgpt_ui,
  171. "fn_without_ui": chatgpt_noui,
  172. "endpoint": api2d_endpoint,
  173. "max_token": 4096,
  174. "tokenizer": tokenizer_gpt35,
  175. "token_cnt": get_token_num_gpt35,
  176. },
  177. "api2d-gpt-4": {
  178. "fn_with_ui": chatgpt_ui,
  179. "fn_without_ui": chatgpt_noui,
  180. "endpoint": api2d_endpoint,
  181. "max_token": 8192,
  182. "tokenizer": tokenizer_gpt4,
  183. "token_cnt": get_token_num_gpt4,
  184. },
  185. # 将 chatglm 直接对齐到 chatglm2
  186. "chatglm": {
  187. "fn_with_ui": chatglm_ui,
  188. "fn_without_ui": chatglm_noui,
  189. "endpoint": None,
  190. "max_token": 1024,
  191. "tokenizer": tokenizer_gpt35,
  192. "token_cnt": get_token_num_gpt35,
  193. },
  194. "chatglm2": {
  195. "fn_with_ui": chatglm_ui,
  196. "fn_without_ui": chatglm_noui,
  197. "endpoint": None,
  198. "max_token": 1024,
  199. "tokenizer": tokenizer_gpt35,
  200. "token_cnt": get_token_num_gpt35,
  201. },
  202. "chatglm3": {
  203. "fn_with_ui": chatglm3_ui,
  204. "fn_without_ui": chatglm3_noui,
  205. "endpoint": None,
  206. "max_token": 8192,
  207. "tokenizer": tokenizer_gpt35,
  208. "token_cnt": get_token_num_gpt35,
  209. },
  210. "qianfan": {
  211. "fn_with_ui": qianfan_ui,
  212. "fn_without_ui": qianfan_noui,
  213. "endpoint": None,
  214. "max_token": 2000,
  215. "tokenizer": tokenizer_gpt35,
  216. "token_cnt": get_token_num_gpt35,
  217. },
  218. "gemini-pro": {
  219. "fn_with_ui": genai_ui,
  220. "fn_without_ui": genai_noui,
  221. "endpoint": None,
  222. "max_token": 1024 * 32,
  223. "tokenizer": tokenizer_gpt35,
  224. "token_cnt": get_token_num_gpt35,
  225. },
  226. "gemini-pro-vision": {
  227. "fn_with_ui": genai_ui,
  228. "fn_without_ui": genai_noui,
  229. "endpoint": None,
  230. "max_token": 1024 * 32,
  231. "tokenizer": tokenizer_gpt35,
  232. "token_cnt": get_token_num_gpt35,
  233. },
  234. }
  235. # -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
  236. for model in AVAIL_LLM_MODELS:
  237. if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()):
  238. mi = copy.deepcopy(model_info[model.replace('api2d-','')])
  239. mi.update({"endpoint": api2d_endpoint})
  240. model_info.update({model: mi})
  241. # -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=-
  242. for model in AVAIL_LLM_MODELS:
  243. if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()):
  244. mi = copy.deepcopy(model_info[model.replace('azure-','')])
  245. mi.update({"endpoint": azure_endpoint})
  246. model_info.update({model: mi})
  247. # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
  248. if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
  249. from .bridge_claude import predict_no_ui_long_connection as claude_noui
  250. from .bridge_claude import predict as claude_ui
  251. model_info.update({
  252. "claude-1-100k": {
  253. "fn_with_ui": claude_ui,
  254. "fn_without_ui": claude_noui,
  255. "endpoint": None,
  256. "max_token": 8196,
  257. "tokenizer": tokenizer_gpt35,
  258. "token_cnt": get_token_num_gpt35,
  259. },
  260. })
  261. model_info.update({
  262. "claude-2": {
  263. "fn_with_ui": claude_ui,
  264. "fn_without_ui": claude_noui,
  265. "endpoint": None,
  266. "max_token": 8196,
  267. "tokenizer": tokenizer_gpt35,
  268. "token_cnt": get_token_num_gpt35,
  269. },
  270. })
  271. if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
  272. from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
  273. from .bridge_jittorllms_rwkv import predict as rwkv_ui
  274. model_info.update({
  275. "jittorllms_rwkv": {
  276. "fn_with_ui": rwkv_ui,
  277. "fn_without_ui": rwkv_noui,
  278. "endpoint": None,
  279. "max_token": 1024,
  280. "tokenizer": tokenizer_gpt35,
  281. "token_cnt": get_token_num_gpt35,
  282. },
  283. })
  284. if "jittorllms_llama" in AVAIL_LLM_MODELS:
  285. from .bridge_jittorllms_llama import predict_no_ui_long_connection as llama_noui
  286. from .bridge_jittorllms_llama import predict as llama_ui
  287. model_info.update({
  288. "jittorllms_llama": {
  289. "fn_with_ui": llama_ui,
  290. "fn_without_ui": llama_noui,
  291. "endpoint": None,
  292. "max_token": 1024,
  293. "tokenizer": tokenizer_gpt35,
  294. "token_cnt": get_token_num_gpt35,
  295. },
  296. })
  297. if "jittorllms_pangualpha" in AVAIL_LLM_MODELS:
  298. from .bridge_jittorllms_pangualpha import predict_no_ui_long_connection as pangualpha_noui
  299. from .bridge_jittorllms_pangualpha import predict as pangualpha_ui
  300. model_info.update({
  301. "jittorllms_pangualpha": {
  302. "fn_with_ui": pangualpha_ui,
  303. "fn_without_ui": pangualpha_noui,
  304. "endpoint": None,
  305. "max_token": 1024,
  306. "tokenizer": tokenizer_gpt35,
  307. "token_cnt": get_token_num_gpt35,
  308. },
  309. })
  310. if "moss" in AVAIL_LLM_MODELS:
  311. from .bridge_moss import predict_no_ui_long_connection as moss_noui
  312. from .bridge_moss import predict as moss_ui
  313. model_info.update({
  314. "moss": {
  315. "fn_with_ui": moss_ui,
  316. "fn_without_ui": moss_noui,
  317. "endpoint": None,
  318. "max_token": 1024,
  319. "tokenizer": tokenizer_gpt35,
  320. "token_cnt": get_token_num_gpt35,
  321. },
  322. })
  323. if "stack-claude" in AVAIL_LLM_MODELS:
  324. from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
  325. from .bridge_stackclaude import predict as claude_ui
  326. model_info.update({
  327. "stack-claude": {
  328. "fn_with_ui": claude_ui,
  329. "fn_without_ui": claude_noui,
  330. "endpoint": None,
  331. "max_token": 8192,
  332. "tokenizer": tokenizer_gpt35,
  333. "token_cnt": get_token_num_gpt35,
  334. }
  335. })
  336. if "newbing-free" in AVAIL_LLM_MODELS:
  337. try:
  338. from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
  339. from .bridge_newbingfree import predict as newbingfree_ui
  340. model_info.update({
  341. "newbing-free": {
  342. "fn_with_ui": newbingfree_ui,
  343. "fn_without_ui": newbingfree_noui,
  344. "endpoint": newbing_endpoint,
  345. "max_token": 4096,
  346. "tokenizer": tokenizer_gpt35,
  347. "token_cnt": get_token_num_gpt35,
  348. }
  349. })
  350. except:
  351. print(trimmed_format_exc())
  352. if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
  353. try:
  354. from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
  355. from .bridge_newbingfree import predict as newbingfree_ui
  356. model_info.update({
  357. "newbing": {
  358. "fn_with_ui": newbingfree_ui,
  359. "fn_without_ui": newbingfree_noui,
  360. "endpoint": newbing_endpoint,
  361. "max_token": 4096,
  362. "tokenizer": tokenizer_gpt35,
  363. "token_cnt": get_token_num_gpt35,
  364. }
  365. })
  366. except:
  367. print(trimmed_format_exc())
  368. if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
  369. try:
  370. from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
  371. from .bridge_chatglmft import predict as chatglmft_ui
  372. model_info.update({
  373. "chatglmft": {
  374. "fn_with_ui": chatglmft_ui,
  375. "fn_without_ui": chatglmft_noui,
  376. "endpoint": None,
  377. "max_token": 4096,
  378. "tokenizer": tokenizer_gpt35,
  379. "token_cnt": get_token_num_gpt35,
  380. }
  381. })
  382. except:
  383. print(trimmed_format_exc())
  384. if "internlm" in AVAIL_LLM_MODELS:
  385. try:
  386. from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
  387. from .bridge_internlm import predict as internlm_ui
  388. model_info.update({
  389. "internlm": {
  390. "fn_with_ui": internlm_ui,
  391. "fn_without_ui": internlm_noui,
  392. "endpoint": None,
  393. "max_token": 4096,
  394. "tokenizer": tokenizer_gpt35,
  395. "token_cnt": get_token_num_gpt35,
  396. }
  397. })
  398. except:
  399. print(trimmed_format_exc())
  400. if "chatglm_onnx" in AVAIL_LLM_MODELS:
  401. try:
  402. from .bridge_chatglmonnx import predict_no_ui_long_connection as chatglm_onnx_noui
  403. from .bridge_chatglmonnx import predict as chatglm_onnx_ui
  404. model_info.update({
  405. "chatglm_onnx": {
  406. "fn_with_ui": chatglm_onnx_ui,
  407. "fn_without_ui": chatglm_onnx_noui,
  408. "endpoint": None,
  409. "max_token": 4096,
  410. "tokenizer": tokenizer_gpt35,
  411. "token_cnt": get_token_num_gpt35,
  412. }
  413. })
  414. except:
  415. print(trimmed_format_exc())
  416. if "qwen-local" in AVAIL_LLM_MODELS:
  417. try:
  418. from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
  419. from .bridge_qwen_local import predict as qwen_local_ui
  420. model_info.update({
  421. "qwen-local": {
  422. "fn_with_ui": qwen_local_ui,
  423. "fn_without_ui": qwen_local_noui,
  424. "endpoint": None,
  425. "max_token": 4096,
  426. "tokenizer": tokenizer_gpt35,
  427. "token_cnt": get_token_num_gpt35,
  428. }
  429. })
  430. except:
  431. print(trimmed_format_exc())
  432. if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
  433. try:
  434. from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
  435. from .bridge_qwen import predict as qwen_ui
  436. model_info.update({
  437. "qwen-turbo": {
  438. "fn_with_ui": qwen_ui,
  439. "fn_without_ui": qwen_noui,
  440. "endpoint": None,
  441. "max_token": 6144,
  442. "tokenizer": tokenizer_gpt35,
  443. "token_cnt": get_token_num_gpt35,
  444. },
  445. "qwen-plus": {
  446. "fn_with_ui": qwen_ui,
  447. "fn_without_ui": qwen_noui,
  448. "endpoint": None,
  449. "max_token": 30720,
  450. "tokenizer": tokenizer_gpt35,
  451. "token_cnt": get_token_num_gpt35,
  452. },
  453. "qwen-max": {
  454. "fn_with_ui": qwen_ui,
  455. "fn_without_ui": qwen_noui,
  456. "endpoint": None,
  457. "max_token": 28672,
  458. "tokenizer": tokenizer_gpt35,
  459. "token_cnt": get_token_num_gpt35,
  460. }
  461. })
  462. except:
  463. print(trimmed_format_exc())
  464. if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
  465. try:
  466. from .bridge_spark import predict_no_ui_long_connection as spark_noui
  467. from .bridge_spark import predict as spark_ui
  468. model_info.update({
  469. "spark": {
  470. "fn_with_ui": spark_ui,
  471. "fn_without_ui": spark_noui,
  472. "endpoint": None,
  473. "max_token": 4096,
  474. "tokenizer": tokenizer_gpt35,
  475. "token_cnt": get_token_num_gpt35,
  476. }
  477. })
  478. except:
  479. print(trimmed_format_exc())
  480. if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
  481. try:
  482. from .bridge_spark import predict_no_ui_long_connection as spark_noui
  483. from .bridge_spark import predict as spark_ui
  484. model_info.update({
  485. "sparkv2": {
  486. "fn_with_ui": spark_ui,
  487. "fn_without_ui": spark_noui,
  488. "endpoint": None,
  489. "max_token": 4096,
  490. "tokenizer": tokenizer_gpt35,
  491. "token_cnt": get_token_num_gpt35,
  492. }
  493. })
  494. except:
  495. print(trimmed_format_exc())
  496. if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
  497. try:
  498. from .bridge_spark import predict_no_ui_long_connection as spark_noui
  499. from .bridge_spark import predict as spark_ui
  500. model_info.update({
  501. "sparkv3": {
  502. "fn_with_ui": spark_ui,
  503. "fn_without_ui": spark_noui,
  504. "endpoint": None,
  505. "max_token": 4096,
  506. "tokenizer": tokenizer_gpt35,
  507. "token_cnt": get_token_num_gpt35,
  508. }
  509. })
  510. except:
  511. print(trimmed_format_exc())
  512. if "llama2" in AVAIL_LLM_MODELS: # llama2
  513. try:
  514. from .bridge_llama2 import predict_no_ui_long_connection as llama2_noui
  515. from .bridge_llama2 import predict as llama2_ui
  516. model_info.update({
  517. "llama2": {
  518. "fn_with_ui": llama2_ui,
  519. "fn_without_ui": llama2_noui,
  520. "endpoint": None,
  521. "max_token": 4096,
  522. "tokenizer": tokenizer_gpt35,
  523. "token_cnt": get_token_num_gpt35,
  524. }
  525. })
  526. except:
  527. print(trimmed_format_exc())
  528. if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
  529. try:
  530. from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
  531. from .bridge_zhipu import predict as zhipu_ui
  532. model_info.update({
  533. "zhipuai": {
  534. "fn_with_ui": zhipu_ui,
  535. "fn_without_ui": zhipu_noui,
  536. "endpoint": None,
  537. "max_token": 4096,
  538. "tokenizer": tokenizer_gpt35,
  539. "token_cnt": get_token_num_gpt35,
  540. }
  541. })
  542. except:
  543. print(trimmed_format_exc())
  544. if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
  545. try:
  546. from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
  547. from .bridge_deepseekcoder import predict as deepseekcoder_ui
  548. model_info.update({
  549. "deepseekcoder": {
  550. "fn_with_ui": deepseekcoder_ui,
  551. "fn_without_ui": deepseekcoder_noui,
  552. "endpoint": None,
  553. "max_token": 2048,
  554. "tokenizer": tokenizer_gpt35,
  555. "token_cnt": get_token_num_gpt35,
  556. }
  557. })
  558. except:
  559. print(trimmed_format_exc())
  560. # <-- 用于定义和切换多个azure模型 -->
  561. AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
  562. if len(AZURE_CFG_ARRAY) > 0:
  563. for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
  564. # 可能会覆盖之前的配置,但这是意料之中的
  565. if not azure_model_name.startswith('azure'):
  566. raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头")
  567. endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \
  568. f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15'
  569. model_info.update({
  570. azure_model_name: {
  571. "fn_with_ui": chatgpt_ui,
  572. "fn_without_ui": chatgpt_noui,
  573. "endpoint": endpoint_,
  574. "azure_api_key": azure_cfg_dict["AZURE_API_KEY"],
  575. "max_token": azure_cfg_dict["AZURE_MODEL_MAX_TOKEN"],
  576. "tokenizer": tokenizer_gpt35, # tokenizer只用于粗估token数量
  577. "token_cnt": get_token_num_gpt35,
  578. }
  579. })
  580. if azure_model_name not in AVAIL_LLM_MODELS:
  581. AVAIL_LLM_MODELS += [azure_model_name]
  582. def LLM_CATCH_EXCEPTION(f):
  583. """
  584. 装饰器函数,将错误显示出来
  585. """
  586. def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
  587. try:
  588. return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
  589. except Exception as e:
  590. tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
  591. observe_window[0] = tb_str
  592. return tb_str
  593. return decorated
  594. def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False):
  595. """
  596. 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
  597. inputs:
  598. 是本次问询的输入
  599. sys_prompt:
  600. 系统静默prompt
  601. llm_kwargs:
  602. LLM的内部调优参数
  603. history:
  604. 是之前的对话列表
  605. observe_window = None:
  606. 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
  607. """
  608. import threading, time, copy
  609. model = llm_kwargs['llm_model']
  610. n_model = 1
  611. if '&' not in model:
  612. assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
  613. # 如果只询问1个大语言模型:
  614. method = model_info[model]["fn_without_ui"]
  615. return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
  616. else:
  617. # 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
  618. executor = ThreadPoolExecutor(max_workers=4)
  619. models = model.split('&')
  620. n_model = len(models)
  621. window_len = len(observe_window)
  622. assert window_len==3
  623. window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
  624. futures = []
  625. for i in range(n_model):
  626. model = models[i]
  627. method = model_info[model]["fn_without_ui"]
  628. llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
  629. llm_kwargs_feedin['llm_model'] = model
  630. future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
  631. futures.append(future)
  632. def mutex_manager(window_mutex, observe_window):
  633. while True:
  634. time.sleep(0.25)
  635. if not window_mutex[-1]: break
  636. # 看门狗(watchdog)
  637. for i in range(n_model):
  638. window_mutex[i][1] = observe_window[1]
  639. # 观察窗(window)
  640. chat_string = []
  641. for i in range(n_model):
  642. chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
  643. res = '<br/><br/>\n\n---\n\n'.join(chat_string)
  644. # # # # # # # # # # #
  645. observe_window[0] = res
  646. t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True)
  647. t_model.start()
  648. return_string_collect = []
  649. while True:
  650. worker_done = [h.done() for h in futures]
  651. if all(worker_done):
  652. executor.shutdown()
  653. break
  654. time.sleep(1)
  655. for i, future in enumerate(futures): # wait and get
  656. return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
  657. window_mutex[-1] = False # stop mutex thread
  658. res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
  659. return res
  660. def predict(inputs, llm_kwargs, *args, **kwargs):
  661. """
  662. 发送至LLM,流式获取输出。
  663. 用于基础的对话功能。
  664. inputs 是本次问询的输入
  665. top_p, temperature是LLM的内部调优参数
  666. history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
  667. chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
  668. additional_fn代表点击的哪个按钮,按钮见functional.py
  669. """
  670. method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
  671. yield from method(inputs, llm_kwargs, *args, **kwargs)