批量Markdown翻译.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. import glob, time, os, re, logging
  2. from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
  3. from toolbox import CatchException, report_exception, get_log_folder
  4. from toolbox import write_history_to_file, promote_file_to_downloadzone
  5. fast_debug = False
  6. class PaperFileGroup():
  7. def __init__(self):
  8. self.file_paths = []
  9. self.file_contents = []
  10. self.sp_file_contents = []
  11. self.sp_file_index = []
  12. self.sp_file_tag = []
  13. # count_token
  14. from request_llms.bridge_all import model_info
  15. enc = model_info["gpt-3.5-turbo"]['tokenizer']
  16. def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
  17. self.get_token_num = get_token_num
  18. def run_file_split(self, max_token_limit=1900):
  19. """
  20. 将长文本分离开来
  21. """
  22. for index, file_content in enumerate(self.file_contents):
  23. if self.get_token_num(file_content) < max_token_limit:
  24. self.sp_file_contents.append(file_content)
  25. self.sp_file_index.append(index)
  26. self.sp_file_tag.append(self.file_paths[index])
  27. else:
  28. from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
  29. segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
  30. for j, segment in enumerate(segments):
  31. self.sp_file_contents.append(segment)
  32. self.sp_file_index.append(index)
  33. self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
  34. logging.info('Segmentation: done')
  35. def merge_result(self):
  36. self.file_result = ["" for _ in range(len(self.file_paths))]
  37. for r, k in zip(self.sp_file_result, self.sp_file_index):
  38. self.file_result[k] += r
  39. def write_result(self, language):
  40. manifest = []
  41. for path, res in zip(self.file_paths, self.file_result):
  42. dst_file = os.path.join(get_log_folder(), f'{gen_time_str()}.md')
  43. with open(dst_file, 'w', encoding='utf8') as f:
  44. manifest.append(dst_file)
  45. f.write(res)
  46. return manifest
  47. def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
  48. from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
  49. # <-------- 读取Markdown文件,删除其中的所有注释 ---------->
  50. pfg = PaperFileGroup()
  51. for index, fp in enumerate(file_manifest):
  52. with open(fp, 'r', encoding='utf-8', errors='replace') as f:
  53. file_content = f.read()
  54. # 记录删除注释后的文本
  55. pfg.file_paths.append(fp)
  56. pfg.file_contents.append(file_content)
  57. # <-------- 拆分过长的Markdown文件 ---------->
  58. pfg.run_file_split(max_token_limit=1500)
  59. n_split = len(pfg.sp_file_contents)
  60. # <-------- 多线程翻译开始 ---------->
  61. if language == 'en->zh':
  62. inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
  63. f"\n\n{frag}" for frag in pfg.sp_file_contents]
  64. inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
  65. sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
  66. elif language == 'zh->en':
  67. inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
  68. f"\n\n{frag}" for frag in pfg.sp_file_contents]
  69. inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
  70. sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
  71. else:
  72. inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" +
  73. f"\n\n{frag}" for frag in pfg.sp_file_contents]
  74. inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
  75. sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
  76. gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
  77. inputs_array=inputs_array,
  78. inputs_show_user_array=inputs_show_user_array,
  79. llm_kwargs=llm_kwargs,
  80. chatbot=chatbot,
  81. history_array=[[""] for _ in range(n_split)],
  82. sys_prompt_array=sys_prompt_array,
  83. # max_workers=5, # OpenAI所允许的最大并行过载
  84. scroller_max_len = 80
  85. )
  86. try:
  87. pfg.sp_file_result = []
  88. for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]):
  89. pfg.sp_file_result.append(gpt_say)
  90. pfg.merge_result()
  91. pfg.write_result(language)
  92. except:
  93. logging.error(trimmed_format_exc())
  94. # <-------- 整理结果,退出 ---------->
  95. create_report_file_name = gen_time_str() + f"-chatgpt.md"
  96. res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name)
  97. promote_file_to_downloadzone(res, chatbot=chatbot)
  98. history = gpt_response_collection
  99. chatbot.append((f"{fp}完成了吗?", res))
  100. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  101. def get_files_from_everything(txt, preference=''):
  102. if txt == "": return False, None, None
  103. success = True
  104. if txt.startswith('http'):
  105. import requests
  106. from toolbox import get_conf
  107. proxies = get_conf('proxies')
  108. # 网络的远程文件
  109. if preference == 'Github':
  110. logging.info('正在从github下载资源 ...')
  111. if not txt.endswith('.md'):
  112. # Make a request to the GitHub API to retrieve the repository information
  113. url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme'
  114. response = requests.get(url, proxies=proxies)
  115. txt = response.json()['download_url']
  116. else:
  117. txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/")
  118. txt = txt.replace("/blob/", "/")
  119. r = requests.get(txt, proxies=proxies)
  120. download_local = f'{get_log_folder(plugin_name="批量Markdown翻译")}/raw-readme-{gen_time_str()}.md'
  121. project_folder = f'{get_log_folder(plugin_name="批量Markdown翻译")}'
  122. with open(download_local, 'wb+') as f: f.write(r.content)
  123. file_manifest = [download_local]
  124. elif txt.endswith('.md'):
  125. # 直接给定文件
  126. file_manifest = [txt]
  127. project_folder = os.path.dirname(txt)
  128. elif os.path.exists(txt):
  129. # 本地路径,递归搜索
  130. project_folder = txt
  131. file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
  132. else:
  133. project_folder = None
  134. file_manifest = []
  135. success = False
  136. return success, file_manifest, project_folder
  137. @CatchException
  138. def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
  139. # 基本信息:功能、贡献者
  140. chatbot.append([
  141. "函数插件功能?",
  142. "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
  143. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  144. disable_auto_promotion(chatbot)
  145. # 尝试导入依赖,如果缺少依赖,则给出安装建议
  146. try:
  147. import tiktoken
  148. except:
  149. report_exception(chatbot, history,
  150. a=f"解析项目: {txt}",
  151. b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
  152. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  153. return
  154. history = [] # 清空历史,以免输入溢出
  155. success, file_manifest, project_folder = get_files_from_everything(txt, preference="Github")
  156. if not success:
  157. # 什么都没有
  158. if txt == "": txt = '空空如也的输入栏'
  159. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
  160. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  161. return
  162. if len(file_manifest) == 0:
  163. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
  164. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  165. return
  166. yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
  167. @CatchException
  168. def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
  169. # 基本信息:功能、贡献者
  170. chatbot.append([
  171. "函数插件功能?",
  172. "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
  173. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  174. disable_auto_promotion(chatbot)
  175. # 尝试导入依赖,如果缺少依赖,则给出安装建议
  176. try:
  177. import tiktoken
  178. except:
  179. report_exception(chatbot, history,
  180. a=f"解析项目: {txt}",
  181. b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
  182. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  183. return
  184. history = [] # 清空历史,以免输入溢出
  185. success, file_manifest, project_folder = get_files_from_everything(txt)
  186. if not success:
  187. # 什么都没有
  188. if txt == "": txt = '空空如也的输入栏'
  189. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
  190. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  191. return
  192. if len(file_manifest) == 0:
  193. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
  194. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  195. return
  196. yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
  197. @CatchException
  198. def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
  199. # 基本信息:功能、贡献者
  200. chatbot.append([
  201. "函数插件功能?",
  202. "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
  203. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  204. disable_auto_promotion(chatbot)
  205. # 尝试导入依赖,如果缺少依赖,则给出安装建议
  206. try:
  207. import tiktoken
  208. except:
  209. report_exception(chatbot, history,
  210. a=f"解析项目: {txt}",
  211. b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
  212. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  213. return
  214. history = [] # 清空历史,以免输入溢出
  215. success, file_manifest, project_folder = get_files_from_everything(txt)
  216. if not success:
  217. # 什么都没有
  218. if txt == "": txt = '空空如也的输入栏'
  219. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
  220. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  221. return
  222. if len(file_manifest) == 0:
  223. report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
  224. yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
  225. return
  226. if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
  227. language = plugin_kwargs.get("advanced_arg", 'Chinese')
  228. yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language)