bridge_chatglm3.py 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. model_name = "ChatGLM3"
  2. cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
  3. from toolbox import get_conf, ProxyNetworkActivate
  4. from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
  5. # ------------------------------------------------------------------------------------------------------------------------
  6. # 🔌💻 Local Model
  7. # ------------------------------------------------------------------------------------------------------------------------
  8. class GetGLM3Handle(LocalLLMHandle):
  9. def load_model_info(self):
  10. # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
  11. self.model_name = model_name
  12. self.cmd_to_install = cmd_to_install
  13. def load_model_and_tokenizer(self):
  14. # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
  15. from transformers import AutoModel, AutoTokenizer
  16. import os, glob
  17. import os
  18. import platform
  19. LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
  20. if LOCAL_MODEL_QUANT == "INT4": # INT4
  21. _model_name_ = "THUDM/chatglm3-6b-int4"
  22. elif LOCAL_MODEL_QUANT == "INT8": # INT8
  23. _model_name_ = "THUDM/chatglm3-6b-int8"
  24. else:
  25. _model_name_ = "THUDM/chatglm3-6b" # FP16
  26. with ProxyNetworkActivate('Download_LLM'):
  27. chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
  28. if device=='cpu':
  29. chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cpu').float()
  30. else:
  31. chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cuda')
  32. chatglm_model = chatglm_model.eval()
  33. self._model = chatglm_model
  34. self._tokenizer = chatglm_tokenizer
  35. return self._model, self._tokenizer
  36. def llm_stream_generator(self, **kwargs):
  37. # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
  38. def adaptor(kwargs):
  39. query = kwargs['query']
  40. max_length = kwargs['max_length']
  41. top_p = kwargs['top_p']
  42. temperature = kwargs['temperature']
  43. history = kwargs['history']
  44. return query, max_length, top_p, temperature, history
  45. query, max_length, top_p, temperature, history = adaptor(kwargs)
  46. for response, history in self._model.stream_chat(self._tokenizer,
  47. query,
  48. history,
  49. max_length=max_length,
  50. top_p=top_p,
  51. temperature=temperature,
  52. ):
  53. yield response
  54. def try_to_import_special_deps(self, **kwargs):
  55. # import something that will raise error if the user does not install requirement_*.txt
  56. # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
  57. import importlib
  58. # importlib.import_module('modelscope')
  59. # ------------------------------------------------------------------------------------------------------------------------
  60. # 🔌💻 GPT-Academic Interface
  61. # ------------------------------------------------------------------------------------------------------------------------
  62. predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM3Handle, model_name, history_format='chatglm3')