123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177 |
- from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
- from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
- from toolbox import write_history_to_file, promote_file_to_downloadzone
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
- from .crazy_utils import read_and_clean_pdf_text
- from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
- from colorful import *
- import os
- @CatchException
- def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- disable_auto_promotion(chatbot)
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- check_packages(["fitz", "tiktoken", "scipdf"])
- except:
- report_exception(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- # 清空历史,以免输入溢出
- history = []
- from .crazy_utils import get_files_from_everything
- success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
- # 检测输入参数,如没有给定输入参数,直接退出
- if not success:
- if txt == "": txt = '空空如也的输入栏'
- # 如果没找到任何文件
- if len(file_manifest) == 0:
- report_exception(chatbot, history,
- a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- # 开始正式执行任务
- grobid_url = get_avail_grobid_url()
- if grobid_url is not None:
- yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
- else:
- yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
- yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
- def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
- import copy, json
- TOKEN_LIMIT_PER_FRAGMENT = 1024
- generated_conclusion_files = []
- generated_html_files = []
- DST_LANG = "中文"
- from crazy_functions.pdf_fns.report_gen_html import construct_html
- for index, fp in enumerate(file_manifest):
- chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- article_dict = parse_pdf(fp, grobid_url)
- grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json")
- with open(grobid_json_res, 'w+', encoding='utf8') as f:
- f.write(json.dumps(article_dict, indent=4, ensure_ascii=False))
- promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot)
-
- if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。")
- yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG)
- chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
- """
- 此函数已经弃用
- """
- import copy
- TOKEN_LIMIT_PER_FRAGMENT = 1024
- generated_conclusion_files = []
- generated_html_files = []
- from crazy_functions.pdf_fns.report_gen_html import construct_html
- for index, fp in enumerate(file_manifest):
- # 读取PDF文件
- file_content, page_one = read_and_clean_pdf_text(fp)
- file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
- page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
- # 递归地切割PDF文件
- from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
- paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
- page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=page_one, limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
- # 为了更好的效果,我们剥离Introduction之后的部分(如果有)
- paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
-
- # 单线,获取文章meta信息
- paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}",
- inputs_show_user=f"请从{fp}中提取出“标题”、“收录会议或期刊”等基本信息。",
- llm_kwargs=llm_kwargs,
- chatbot=chatbot, history=[],
- sys_prompt="Your job is to collect information from materials。",
- )
- # 多线,翻译
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=[
- f"你需要翻译以下内容:\n{frag}" for frag in paper_fragments],
- inputs_show_user_array=[f"\n---\n 原文: \n\n {frag.replace('#', '')} \n---\n 翻译:\n " for frag in paper_fragments],
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[paper_meta] for _ in paper_fragments],
- sys_prompt_array=[
- "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments],
- # max_workers=5 # OpenAI所允许的最大并行过载
- )
- gpt_response_collection_md = copy.deepcopy(gpt_response_collection)
- # 整理报告的格式
- for i,k in enumerate(gpt_response_collection_md):
- if i%2==0:
- gpt_response_collection_md[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection_md)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection_md)//2}]:\n "
- else:
- gpt_response_collection_md[i] = gpt_response_collection_md[i]
- final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
- final.extend(gpt_response_collection_md)
- create_report_file_name = f"{os.path.basename(fp)}.trans.md"
- res = write_history_to_file(final, create_report_file_name)
- promote_file_to_downloadzone(res, chatbot=chatbot)
- # 更新UI
- generated_conclusion_files.append(f'{get_log_folder()}/{create_report_file_name}')
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- # write html
- try:
- ch = construct_html()
- orig = ""
- trans = ""
- gpt_response_collection_html = copy.deepcopy(gpt_response_collection)
- for i,k in enumerate(gpt_response_collection_html):
- if i%2==0:
- gpt_response_collection_html[i] = paper_fragments[i//2].replace('#', '')
- else:
- gpt_response_collection_html[i] = gpt_response_collection_html[i]
- final = ["论文概况", paper_meta_info.replace('# ', '### '), "二、论文翻译", ""]
- final.extend(gpt_response_collection_html)
- for i, k in enumerate(final):
- if i%2==0:
- orig = k
- if i%2==1:
- trans = k
- ch.add_row(a=orig, b=trans)
- create_report_file_name = f"{os.path.basename(fp)}.trans.html"
- generated_html_files.append(ch.save_file(create_report_file_name))
- except:
- from toolbox import trimmed_format_exc
- print('writing html result failed:', trimmed_format_exc())
- # 准备文件的下载
- for pdf_path in generated_conclusion_files:
- # 重命名文件
- rename_file = f'翻译-{os.path.basename(pdf_path)}'
- promote_file_to_downloadzone(pdf_path, rename_file=rename_file, chatbot=chatbot)
- for html_path in generated_html_files:
- # 重命名文件
- rename_file = f'翻译-{os.path.basename(html_path)}'
- promote_file_to_downloadzone(html_path, rename_file=rename_file, chatbot=chatbot)
- chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|