diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml deleted file mode 100644 index 9c8e5016a783666ee6033569e7fcec5bf6356e34..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Report Bug | 报告BUG -description: "Report bug" -title: "[Bug]: " -labels: [] -body: - - type: dropdown - id: download - attributes: - label: Installation Method | 安装方法与平台 - options: - - Please choose | 请选择 - - Pip Install (I ignored requirements.txt) - - Pip Install (I used latest requirements.txt) - - Anaconda (I ignored requirements.txt) - - Anaconda (I used latest requirements.txt) - - Docker(Windows/Mac) - - Docker(Linux) - - Docker-Compose(Windows/Mac) - - Docker-Compose(Linux) - - Huggingface - - Others (Please Describe) - validations: - required: true - - - type: dropdown - id: version - attributes: - label: Version | 版本 - options: - - Please choose | 请选择 - - Latest | 最新版 - - Others | 非最新版 - validations: - required: true - - - type: dropdown - id: os - attributes: - label: OS | 操作系统 - options: - - Please choose | 请选择 - - Windows - - Mac - - Linux - - Docker - validations: - required: true - - - type: textarea - id: describe - attributes: - label: Describe the bug | 简述 - description: Describe the bug | 简述 - validations: - required: true - - - type: textarea - id: screenshot - attributes: - label: Screen Shot | 有帮助的截图 - description: Screen Shot | 有帮助的截图 - validations: - required: true - - - type: textarea - id: traceback - attributes: - label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有) - description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有) - - - - - - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml deleted file mode 100644 index 80ac7e311c9d191f43f778f3fcbdf9d2585c2db3..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Feature Request | 功能请求 -description: "Feature Request" -title: "[Feature]: " -labels: [] -body: - - type: dropdown - id: download - attributes: - label: Class | 类型 - options: - - Please choose | 请选择 - - 其他 - - 函数插件 - - 大语言模型 - - 程序主体 - validations: - required: false - - - type: textarea - id: traceback - attributes: - label: Feature Request | 功能请求 - description: Feature Request | 功能请求 - - - - - diff --git a/.github/workflows/build-with-chatglm.yml b/.github/workflows/build-with-chatglm.yml deleted file mode 100644 index f968bb962a026ebb367121607885f8496addfe0e..0000000000000000000000000000000000000000 --- a/.github/workflows/build-with-chatglm.yml +++ /dev/null @@ -1,44 +0,0 @@ -# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image for ChatGLM support - -on: - push: - branches: - - 'master' - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }}_chatglm_moss - -jobs: - build-and-push-image: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Log in to the Container registry - uses: docker/login-action@v2 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - push: true - file: docs/GithubAction+ChatGLM+Moss - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/build-with-jittorllms.yml b/.github/workflows/build-with-jittorllms.yml deleted file mode 100644 index c0ce126a9dafa07a176dd5f12f7260f81e20e437..0000000000000000000000000000000000000000 --- a/.github/workflows/build-with-jittorllms.yml +++ /dev/null @@ -1,44 +0,0 @@ -# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image for ChatGLM support - -on: - push: - branches: - - 'master' - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }}_jittorllms - -jobs: - build-and-push-image: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Log in to the Container registry - uses: docker/login-action@v2 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - push: true - file: docs/GithubAction+JittorLLMs - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/build-with-latex.yml b/.github/workflows/build-with-latex.yml deleted file mode 100644 index fb16d2c11fdc7e572bb78a3513b6c91744429a4b..0000000000000000000000000000000000000000 --- a/.github/workflows/build-with-latex.yml +++ /dev/null @@ -1,44 +0,0 @@ -# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image for Latex support - -on: - push: - branches: - - 'master' - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }}_with_latex - -jobs: - build-and-push-image: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Log in to the Container registry - uses: docker/login-action@v2 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - push: true - file: docs/GithubAction+NoLocal+Latex - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/build-without-local-llms.yml b/.github/workflows/build-without-local-llms.yml deleted file mode 100644 index b0aed7f6b595bf89bf22d25f7e1fbe966f4f37eb..0000000000000000000000000000000000000000 --- a/.github/workflows/build-without-local-llms.yml +++ /dev/null @@ -1,44 +0,0 @@ -# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image - -on: - push: - branches: - - 'master' - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }}_nolocal - -jobs: - build-and-push-image: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Log in to the Container registry - uses: docker/login-action@v2 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - push: true - file: docs/GithubAction+NoLocal - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.gitignore b/.gitignore index 18d3fb84e33168f062969608f9c84d91377b0f4d..f7b86e006917519e5174a06d5c0b558d62ab6ca1 100644 --- a/.gitignore +++ b/.gitignore @@ -131,22 +131,9 @@ dmypy.json # Pyre type checker .pyre/ -.vscode -.idea - history ssr_conf config_private.py gpt_log private.md -private_upload -other_llms -cradle* -debug* -private* -crazy_functions/test_project/pdf_and_word -crazy_functions/test_samples -request_llm/jittorllms -multi-language -request_llm/moss -media +private_upload \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 545fb400e12008a93b854ea0264c253578a9ba86..0000000000000000000000000000000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,32 +0,0 @@ -default_language_version: - python: python3 -exclude: 'dotnet' -ci: - autofix_prs: true - autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' - autoupdate_schedule: 'quarterly' - -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: check-ast - # - id: check-yaml - - id: check-toml - - id: check-json - - id: check-byte-order-marker - exclude: .gitignore - - id: check-merge-conflict - - id: detect-private-key - - id: trailing-whitespace - - id: end-of-file-fixer - - id: no-commit-to-branch - - repo: https://github.com/psf/black - rev: 23.3.0 - hooks: - - id: black - # - repo: https://github.com/charliermarsh/ruff-pre-commit - # rev: v0.0.261 - # hooks: - # - id: ruff - # args: ["--fix"] diff --git a/Dockerfile b/Dockerfile index 97dba6f114fd0db32bc14d123e699e341ca9c02c..564392c933342f77731be47faa417bb8906067bc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,34 +1,13 @@ -# 此Dockerfile适用于“无本地模型”的迷你运行环境构建 -# 如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml -# - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . ` -# - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic ` -# - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic ` FROM python:3.11 - -# 非必要步骤,更换pip源 (以下三行,可以删除) RUN echo '[global]' > /etc/pip.conf && \ echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf +RUN pip3 install gradio requests[socks] mdtex2html -# 进入工作路径(必要) +COPY . /gpt WORKDIR /gpt -# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除) -COPY requirements.txt ./ -RUN pip3 install -r requirements.txt - - -# 装载项目文件,安装剩余依赖(必要) -COPY . . -RUN pip3 install -r requirements.txt - - -# 非必要步骤,用于预热模块(可以删除) -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - - -# 启动(必要) -CMD ["python3", "-u", "main.py"] +CMD ["python3", "main.py"] \ No newline at end of file diff --git a/README.md b/README.md index 49fa0683dd3d1ac2826be4c2d43a879a5aa705f1..8e018501783df2b3e0947605555ccc84aef9fd60 100644 --- a/README.md +++ b/README.md @@ -1,456 +1,217 @@ + --- -title: GPT-Academic +title: ChatImprovement emoji: 😻 colorFrom: blue colorTo: blue sdk: gradio -sdk_version: 3.32.0 +sdk_version: 3.23.0 app_file: app.py pinned: false --- # ChatGPT 学术优化 -> **Note** -> -> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。 -> -> 2023.12.26: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。 - -
- -
-

- GPT 学术优化 (GPT Academic) -

- -[![Github][Github-image]][Github-url] -[![License][License-image]][License-url] -[![Releases][Releases-image]][Releases-url] -[![Installation][Installation-image]][Installation-url] -[![Wiki][Wiki-image]][Wiki-url] -[![PR][PRs-image]][PRs-url] - -[Github-image]: https://img.shields.io/badge/github-12100E.svg?style=flat-square -[License-image]: https://img.shields.io/github/license/binary-husky/gpt_academic?label=License&style=flat-square&color=orange -[Releases-image]: https://img.shields.io/github/release/binary-husky/gpt_academic?label=Release&style=flat-square&color=blue -[Installation-image]: https://img.shields.io/badge/dynamic/json?color=blue&url=https://raw.githubusercontent.com/binary-husky/gpt_academic/master/version&query=$.version&label=Installation&style=flat-square -[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square -[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square - -[Github-url]: https://github.com/binary-husky/gpt_academic -[License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE -[Releases-url]: https://github.com/binary-husky/gpt_academic/releases -[Installation-url]: https://github.com/binary-husky/gpt_academic#installation -[Wiki-url]: https://github.com/binary-husky/gpt_academic/wiki -[PRs-url]: https://github.com/binary-husky/gpt_academic/pulls - - -
-
- -**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** - -If you like this project, please give it a Star. -Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). -
-> [!NOTE] -> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 -> [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) -> -> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。 - -

+**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的学术快捷键,欢迎发issue或者pull requests** +If you like this project, please give it a Star. If you've come up with more useful academic shortcuts, feel free to open an issue or pull request.
-功能(⭐= 近期新增功能) | 描述 +功能 | 描述 --- | --- -⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/) -⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本) -⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具 -⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机 -⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能! -⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件 -润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码 -[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键 -模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW) -读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要 -Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文 -批量注释生成 | [插件] 一键批量生成函数注释 -Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔 -[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程) -[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF -Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF -[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时 -公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮 -启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题 -[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧? -更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) -⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) -更多新功能展示 (图像生成等) …… | 见本文档结尾处 …… -
+一键润色 | 支持一键润色、一键查找论文语法错误 +一键中英互译 | 一键中英互译 +一键代码解释 | 可以正确显示代码、解释代码 +自定义快捷键 | 支持自定义快捷键 +配置代理服务器 | 支持配置代理服务器 +模块化设计 | 支持自定义高阶的实验性功能 +自我程序剖析 | [实验性功能] 一键读懂本项目的源代码 +程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目 +读论文 | [实验性功能] 一键解读latex论文全文并生成摘要 +批量注释生成 | [实验性功能] 一键批量生成函数注释 +chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报 +公式显示 | 可以同时显示公式的tex形式和渲染形式 +图片显示 | 可以在markdown中显示图片 +支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格 + -- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换) +- 新界面
- +
-- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板 -
- -
-- 润色/纠错 +- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
- +
-- 如果输出包含公式,会以tex形式和渲染形式同时显示,方便复制和阅读 +- 代码的显示自然也不在话下 https://www.bilibili.com/video/BV1F24y147PD/
- +
-- 懒得看项目代码?直接把整个工程炫ChatGPT嘴里 + +- 支持GPT输出的markdown表格
- +
-- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + GPT4) +- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
- +
-

- -# Installation - -```mermaid -flowchart TD - A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)") - W1 --> W11["1. Python pip包管理依赖"] - W1 --> W12["2. Anaconda包管理依赖(推荐⭐)"] - - A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"] - W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"] - W2 --> k2["2. 仅在线模型(GPT, GLM4等)镜像"] - W2 --> k3["3. 在线模型 + Latex的大镜像"] +- 懒得看项目代码?整个工程直接给chatgpt炫嘴里 +
+ +
- A --> W4["IV. 🚀其他部署方法"] - W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"] - W4 --> C2["2. Huggingface, Sealos远程部署"] - W4 --> C4["3. ... 其他 ..."] +## 直接运行 (Windows or Linux or MacOS) + +``` sh +# 下载项目 +git clone https://github.com/binary-husky/chatgpt_academic.git +cd chatgpt_academic +# 在config.py中,配置 海外Proxy 和 OpenAI API KEY +- 1.如果你在国内,需要设置海外代理才能够使用 OpenAI API,你可以通过 config.py 文件来进行设置。 +- 2.配置 OpenAI API KEY。你需要在 OpenAI 官网上注册并获取 API KEY。一旦你拿到了 API KEY,在 config.py 文件里配置好即可。 +# 安装依赖 +python -m pip install -r requirements.txt +# 运行 +python main.py + +# 测试实验性功能 +## 测试C++项目头文件分析 +input区域 输入 ./crazy_functions/test_project/cpp/libJPG , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" +## 测试给Latex项目写摘要 +input区域 输入 ./crazy_functions/test_project/latex/attention , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" +## 测试Python项目分析 +input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" +## 测试自我代码解读 +点击 "[实验] 请解析并解构此项目本身" +## 测试实验功能模板函数(要求gpt回答几个数的平方是什么),您可以根据此函数为模板,实现更复杂的功能 +点击 "[实验] 实验功能函数模板" ``` -### 安装方法I:直接运行 (Windows, Linux or MacOS) - -1. 下载项目 - - ```sh - git clone --depth=1 https://github.com/binary-husky/gpt_academic.git - cd gpt_academic - ``` -2. 配置API_KEY等变量 +## 使用docker (Linux) + +``` sh +# 下载项目 +git clone https://github.com/binary-husky/chatgpt_academic.git +cd chatgpt_academic +# 配置 海外Proxy 和 OpenAI API KEY +config.py +# 安装 +docker build -t gpt-academic . +# 运行 +docker run --rm -it --net=host gpt-academic + +# 测试实验性功能 +## 测试自我代码解读 +点击 "[实验] 请解析并解构此项目本身" +## 测试实验功能模板函数(要求gpt回答几个数的平方是什么),您可以根据此函数为模板,实现更复杂的功能 +点击 "[实验] 实验功能函数模板" +##(请注意在docker中运行时,需要额外注意程序的文件访问权限问题) +## 测试C++项目头文件分析 +input区域 输入 ./crazy_functions/test_project/cpp/libJPG , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" +## 测试给Latex项目写摘要 +input区域 输入 ./crazy_functions/test_project/latex/attention , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" +## 测试Python项目分析 +input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" - 在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 - - 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,从而确保自动更新时不会丢失配置 」。 - - 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。 - - -3. 安装依赖 - ```sh - # (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ - python -m pip install -r requirements.txt - - # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): - conda create -n gptac_venv python=3.11 # 创建anaconda环境 - conda activate gptac_venv # 激活anaconda环境 - python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤 - ``` - - -
如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处 -

- -【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): - -```sh -# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【可选步骤II】支持复旦MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径 - -# 【可选步骤III】支持RWKV Runner -参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] - -# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) -pip install bitsandbyte -# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui -python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui -pip install -U git+https://github.com/huggingface/transformers.git -pip install -U git+https://github.com/huggingface/accelerate.git -pip install peft ``` -

-
- - - -4. 运行 - ```sh - python main.py - ``` - -### 安装方法II:使用Docker - -0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - - ``` sh - # 修改docker-compose.yml,保留方案0并删除其他方案。然后运行: - docker-compose up - ``` - -1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - - ``` sh - # 修改docker-compose.yml,保留方案1并删除其他方案。然后运行: - docker-compose up - ``` -P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。 - -2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - - ``` sh - # 修改docker-compose.yml,保留方案2并删除其他方案。然后运行: - docker-compose up - ``` - - -### 安装方法III:其他部署方法 -1. **Windows一键运行脚本**。 -完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。 - -2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) - -3. 云服务器远程部署避坑指南。 -请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -4. 在其他平台部署&二级网址部署 - - 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。 - - 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md) - -

- -# Advanced Usage -### I:自定义新的便捷按钮(学术快捷键) - -任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。) +## 自定义新的便捷按钮(学术快捷键自定义) +打开functional.py,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。) 例如 - -```python +``` "超级英译中": { - # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 - "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n", + # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 + "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n", + # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。 "Suffix": "", + }, ``` -
-### II:自定义函数插件 -编写强大的函数插件来执行任何你想得到的和想不到的任务。 -本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。 -详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。 -

+如果你发明了更好用的学术快捷键,欢迎发issue或者pull requests! -# Updates -### I:动态 +## 配置代理 -1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件, -另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。 -Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史html存档缓存。 -
- -
+在```config.py```中修改端口与代理软件对应 -2. ⭐Latex/Arxiv论文翻译功能⭐
- ===> - + +
-3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件) +配置完成后,你可以用以下命令测试代理是否工作,如果一切正常,下面的代码将输出你的代理服务器所在地: +``` +python check_proxy.py +``` -- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://openreview.net/pdf?id=rJl0r3R9KX ” -- 步骤二:点击“虚空终端” +## 兼容性测试 +### 图片显示:
- + +
-4. 模块化功能设计,简单的接口却能支持强大的功能 +### 如果一个程序能够读懂并剖析自己: +
- - +
-5. 译解其他开源项目
- - +
-6. 装饰[live2d](https://github.com/fghrsh/live2d_demo)的小功能(默认关闭,需要修改`config.py`) +### 其他任意Python/Cpp项目剖析:
- +
-7. OpenAI图像生成
- +
-8. 基于mermaid的流图、脑图绘制 +### Latex论文一键阅读理解与摘要生成
- +
-9. Latex全文校对纠错 +### 自动报告生成
- ===> - + + +
-10. 语言、主题切换 +### 模块化功能设计
- + +
- -### II:版本: -- version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件 -- version 3.70: 引入Mermaid绘图,实现GPT画脑图等功能 -- version 3.60: 引入AutoGen作为新一代插件的基石 -- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG -- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 -- version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 -- version 3.54: 新增动态代码解释器(Code Interpreter)(待完善) -- version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题 -- version 3.50: 使用自然语言调用本项目的所有函数插件(虚空终端),支持插件分类,改进UI,设计新主题 -- version 3.49: 支持百度千帆平台和文心一言 -- version 3.48: 支持阿里达摩院通义千问,上海AI-Lab书生,讯飞星火 -- version 3.46: 支持完全脱手操作的实时语音对话 -- version 3.45: 支持自定义ChatGLM2微调模型 -- version 3.44: 正式支持Azure,优化界面易用性 -- version 3.4: +arxiv论文翻译、latex论文批改功能 -- version 3.3: +互联网信息综合功能 -- version 3.2: 函数插件支持更多参数接口 (保存对话功能, 解读任意语言代码+同时询问任意的LLM组合) -- version 3.1: 支持同时问询多个gpt模型!支持api2d,支持多个apikey负载均衡 -- version 3.0: 对chatglm和其他小型llm的支持 -- version 2.6: 重构了插件结构,提高了交互性,加入更多插件 -- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题 -- version 2.4: 新增PDF全文翻译功能; 新增输入区切换位置的功能 -- version 2.3: 增强多线程交互性 -- version 2.2: 函数插件支持热重载 -- version 2.1: 可折叠式布局 -- version 2.0: 引入模块化函数插件 -- version 1.0: 基础功能 - -GPT Academic开发者QQ群:`610599535` - -- 已知问题 - - 某些浏览器翻译插件干扰此软件前端的运行 - - 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio** - -```mermaid -timeline LR - title GPT-Academic项目发展历程 - section 2.x - 1.0~2.2: 基础功能: 引入模块化函数插件: 可折叠式布局: 函数插件支持热重载 - 2.3~2.5: 增强多线程交互性: 新增PDF全文翻译功能: 新增输入区切换位置的功能: 自更新 - 2.6: 重构了插件结构: 提高了交互性: 加入更多插件 - section 3.x - 3.0~3.1: 对chatglm支持: 对其他小型llm支持: 支持同时问询多个gpt模型: 支持多个apikey负载均衡 - 3.2~3.3: 函数插件支持更多参数接口: 保存对话功能: 解读任意语言代码: 同时询问任意的LLM组合: 互联网信息综合功能 - 3.4: 加入arxiv论文翻译: 加入latex论文批改功能 - 3.44: 正式支持Azure: 优化界面易用性 - 3.46: 自定义ChatGLM2微调模型: 实时语音对话 - 3.49: 支持阿里达摩院通义千问: 上海AI-Lab书生: 讯飞星火: 支持百度千帆平台 & 文心一言 - 3.50: 虚空终端: 支持插件分类: 改进UI: 设计新主题 - 3.53: 动态选择不同界面主题: 提高稳定性: 解决多用户冲突问题 - 3.55: 动态代码解释器: 重构前端界面: 引入悬浮窗口与菜单栏 - 3.56: 动态追加基础功能按钮: 新汇报PDF汇总页面 - 3.57: GLM3, 星火v3: 支持文心一言v4: 修复本地模型的并发BUG - 3.60: 引入AutoGen - 3.70: 引入Mermaid绘图: 实现GPT画脑图等功能 - 3.80(TODO): 优化AutoGen插件主题: 设计衍生插件 - -``` - - -### III:主题 -可以通过修改`THEME`选项(config.py)变更主题 -1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV:本项目的开发分支 - -1. `master` 分支: 主分支,稳定版 -2. `frontier` 分支: 开发分支,测试版 -3. 如何[接入其他大模型](request_llms/README.md) -4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online) - -### V:参考与学习 - +## 参考项目 ``` -代码中参考了很多其他优秀项目中的设计,顺序不分先后: - -# 清华ChatGLM2-6B: -https://github.com/THUDM/ChatGLM2-6B - -# 清华JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Oobabooga one-click installer: -https://github.com/oobabooga/one-click-installers - -# More: +https://github.com/Python-Markdown/markdown https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo +https://github.com/polarwinkel/mdtex2html +https://github.com/GaiZhenbiao/ChuanhuChatGPT ``` diff --git a/app.py b/app.py index b3b5abb3423a4ff5a50a580481ad0b4dfeb68d09..471355a22946aa85f003c3b18ffc1164ff24605b 100644 --- a/app.py +++ b/app.py @@ -1,412 +1,111 @@ -import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 - -help_menu_description = \ -"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), -感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors). -

常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), -如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues). -

普通对话使用说明: 1. 输入问题; 2. 点击提交 -

基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮 -

函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮 -

虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端 -

如何保存对话: 点击保存当前的对话按钮 -

如何语音对话: 请阅读Wiki -

如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)""" - -def main(): - import subprocess, sys - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://public.agent-matrix.com/publish/gradio-3.32.8-py3-none-any.whl']) - import gradio as gr - if gr.__version__ not in ['3.32.8']: - raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") - from request_llms.bridge_all import predict - from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith - # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址 - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') - CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') - ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU') - DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE') - INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT') - - # 如果WEB_PORT是-1, 则随机选取WEB端口 - PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT - from check_proxy import get_current_version - from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2 - from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init - from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie - title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" - - # 问询记录, python 版本建议3.9+(越新越好) - import logging, uuid - os.makedirs(PATH_LOGGING, exist_ok=True) - try:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") - except:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") - # Disable logging output from the 'httpx' logger - logging.getLogger("httpx").setLevel(logging.WARNING) - print(f"所有问询记录将自动保存在本地目录./{PATH_LOGGING}/chat_secrets.log, 请注意自我隐私保护哦!") - - # 一些普通功能模块 - from core_functional import get_core_functions - functional = get_core_functions() - - # 高级函数插件 - from crazy_functional import get_crazy_functions - DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS') - plugins = get_crazy_functions() - all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')])) - match_group = lambda tags, groups: any([g in groups for g in tags.split('|')]) - - # 处理markdown文本格式的转变 - gr.Chatbot.postprocess = format_io - - # 做一些外观色彩上的调整 - set_theme = adjust_theme() - - # 代理与自动更新 - from check_proxy import check_proxy, auto_update, warm_up_modules - proxy_info = check_proxy(proxies) - - gr_L1 = lambda: gr.Row().style() - gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400) - if LAYOUT == "TOP-DOWN": - gr_L1 = lambda: DummyWith() - gr_L2 = lambda scale, elem_id: gr.Row() - CHATBOT_HEIGHT /= 2 - - cancel_handles = [] - customize_btns = {} - predefined_btns = {} - with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: - gr.HTML(title_html) - gr.HTML('''
Duplicate Space请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!使用时,先在输入框填入API-KEY然后回车。
切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!
支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。
''') - secret_css, dark_mode, py_pickle_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False) - cookies = gr.State(load_chat_cookies()) - with gr_L1(): - with gr_L2(scale=2, elem_id="gpt-chat"): - chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot") - if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT) - history = gr.State([]) - with gr_L2(scale=1, elem_id="gpt-panel"): - with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary: - with gr.Row(): - txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持多个OpenAI密钥共存。").style(container=False) - with gr.Row(): - submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary") - with gr.Row(): - resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm") - stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm") - clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") - if ENABLE_AUDIO: - with gr.Row(): - audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) - with gr.Row(): - status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") - - with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: - with gr.Row(): - for k in range(NUM_CUSTOM_BASIC_BTN): - customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮') - customize_btn.style(size="sm") - customize_btns.update({"自定义按钮" + str(k+1): customize_btn}) - for k in functional: - if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue - variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" - functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}') - functional[k]["Button"].style(size="sm") - predefined_btns.update({k: functional[k]["Button"]}) - with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn: - with gr.Row(): - gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") - with gr.Row(elem_id="input-plugin-group"): - plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS, - multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False) - with gr.Row(): - for k, plugin in plugins.items(): - if not plugin.get("AsButton", True): continue - visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False - variant = plugins[k]["Color"] if "Color" in plugin else "secondary" - info = plugins[k].get("Info", k) - plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, - visible=visible, info_str=f'函数插件区: {info}').style(size="sm") - with gr.Row(): - with gr.Accordion("更多函数插件", open=True): - dropdown_fn_list = [] - for k, plugin in plugins.items(): - if not match_group(plugin['Group'], DEFAULT_FN_GROUPS): continue - if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件 - elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示 - with gr.Row(): - dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False) - with gr.Row(): - plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, - placeholder="这里是特殊函数插件的高级参数输入区").style(container=False) - with gr.Row(): - switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm") - with gr.Row(): - with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up: - file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload") - - with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"): - with gr.Row(): - with gr.Tab("上传文件", elem_id="interact-panel"): - gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。") - file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float") - - with gr.Tab("更换模型", elem_id="interact-panel"): - md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) - max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",) - system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT) - - with gr.Tab("界面外观", elem_id="interact-panel"): - theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) - opt = ["自定义菜单"] - value=[] - if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"] - checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False) - dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") - dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode) - with gr.Tab("帮助", elem_id="interact-panel"): - gr.Markdown(help_menu_description) - - with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary: - with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"): - with gr.Row() as row: - row.style(equal_height=True) - with gr.Column(scale=10): - txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", - elem_id='user_input_float', lines=8, label="输入区2").style(container=False) - with gr.Column(scale=1, min_width=40): - submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm") - resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") - stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") - clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm") - - - with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize: - with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"): - with gr.Row() as row: - with gr.Column(scale=10): - AVAIL_BTN = [btn for btn in customize_btns.keys()] + [k for k in functional] - basic_btn_dropdown = gr.Dropdown(AVAIL_BTN, value="自定义按钮1", label="选择一个需要自定义基础功能区按钮").style(container=False) - basic_fn_title = gr.Textbox(show_label=False, placeholder="输入新按钮名称", lines=1).style(container=False) - basic_fn_prefix = gr.Textbox(show_label=False, placeholder="输入新提示前缀", lines=4).style(container=False) - basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False) - with gr.Column(scale=1, min_width=70): - basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm") - basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm") - def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False): - ret = {} - # 读取之前的自定义按钮 - customize_fn_overwrite_ = cookies_['customize_fn_overwrite'] - # 更新新的自定义按钮 - customize_fn_overwrite_.update({ - basic_btn_dropdown_: - { - "Title":basic_fn_title, - "Prefix":basic_fn_prefix, - "Suffix":basic_fn_suffix, - } - } - ) - if clean_up: - customize_fn_overwrite_ = {} - cookies_.update(customize_fn_overwrite_) # 更新cookie - visible = (not clean_up) and (basic_fn_title != "") - if basic_btn_dropdown_ in customize_btns: - # 是自定义按钮,不是预定义按钮 - ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)}) - else: - # 是预定义按钮 - ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)}) - ret.update({cookies: cookies_}) - try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict - except: persistent_cookie_ = {} - persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value - persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict - ret.update({py_pickle_cookie: persistent_cookie_}) # write persistent cookie - return ret - - # update btn - h = basic_fn_confirm.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix], - [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) - h.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""") - # clean up btn - h2 = basic_fn_clean.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)], - [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) - h2.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""") - - def persistent_cookie_reload(persistent_cookie_, cookies_): - ret = {} - for k in customize_btns: - ret.update({customize_btns[k]: gr.update(visible=False, value="")}) - - try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict - except: return ret - - customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {}) - cookies_['customize_fn_overwrite'] = customize_fn_overwrite_ - ret.update({cookies: cookies_}) - - for k,v in persistent_cookie_["custom_bnt"].items(): - if v['Title'] == "": continue - if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])}) - else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])}) - return ret - - # 功能区显示开关与功能区的互动 - def fn_area_visibility(a): - ret = {} - ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))}) - ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))}) - ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))}) - if "浮动输入区" in a: ret.update({txt: gr.update(value="")}) - return ret - checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] ) - checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide) - - # 功能区显示开关与功能区的互动 - def fn_area_visibility_2(a): - ret = {} - ret.update({area_customize: gr.update(visible=("自定义菜单" in a))}) - return ret - checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] ) - checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2) - - # 整理反复出现的控件句柄组合 - input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg] - output_combo = [cookies, chatbot, history, status] - predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo) - # 提交按钮、重置按钮 - cancel_handles.append(txt.submit(**predict_args)) - cancel_handles.append(txt2.submit(**predict_args)) - cancel_handles.append(submitBtn.click(**predict_args)) - cancel_handles.append(submitBtn2.click(**predict_args)) - resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status - resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status - resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history - resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history - clearBtn.click(None, None, [txt, txt2], _js=js_code_clear) - clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear) - if AUTO_CLEAR_TXT: - submitBtn.click(None, None, [txt, txt2], _js=js_code_clear) - submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear) - txt.submit(None, None, [txt, txt2], _js=js_code_clear) - txt2.submit(None, None, [txt, txt2], _js=js_code_clear) - # 基础功能区的回调函数注册 - for k in functional: - if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue - click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) - cancel_handles.append(click_handle) - for btn in customize_btns.values(): - click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo) - cancel_handles.append(click_handle) - # 文件上传区,接收文件后与chatbot的互动 - file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}") - file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}") - # 函数插件-固定按钮区 - for k in plugins: - if not plugins[k].get("AsButton", True): continue - click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo) - click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]) - cancel_handles.append(click_handle) - # 函数插件-下拉菜单与随变按钮的互动 - def on_dropdown_changed(k): - variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary" - info = plugins[k].get("Info", k) - ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')} - if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 - ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) - else: - ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")}) - return ret - dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] ) - - def on_md_dropdown_changed(k): - return {chatbot: gr.update(label="当前模型:"+k)} - md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] ) - - def on_theme_dropdown_changed(theme, secret_css): - adjust_theme, css_part1, _, adjust_dynamic_theme = load_dynamic_theme(theme) - if adjust_dynamic_theme: - css_part2 = adjust_dynamic_theme._get_theme_css() - else: - css_part2 = adjust_theme()._get_theme_css() - return css_part2 + css_part1 - - theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css]) - theme_handle.then( - None, - [secret_css], - None, - _js=js_code_for_css_changing - ) - # 随变按钮的回调函数注册 - def route(request: gr.Request, k, *args, **kwargs): - if k in [r"打开插件列表", r"请先从插件列表中选择"]: return - yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs) - click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo) - click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]) - cancel_handles.append(click_handle) - # 终止按钮的回调函数注册 - stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - plugins_as_btn = {name:plugin for name, plugin in plugins.items() if plugin.get('Button', None)} - def on_group_change(group_list): - btn_list = [] - fns_list = [] - if not group_list: # 处理特殊情况:没有选择任何插件组 - return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])] - for k, plugin in plugins.items(): - if plugin.get("AsButton", True): - btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮 - if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示 - elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表 - return [*btn_list, gr.Dropdown.update(choices=fns_list)] - plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown]) - if ENABLE_AUDIO: - from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution - rad = RealtimeAudioDistribution() - def deal_audio(audio, cookies): - rad.feed(cookies['uuid'].hex, audio) - audio_mic.stream(deal_audio, inputs=[audio_mic, cookies]) - - - demo.load(init_cookie, inputs=[cookies], outputs=[cookies]) - demo.load(persistent_cookie_reload, inputs = [py_pickle_cookie, cookies], - outputs = [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init) - demo.load(None, inputs=[dark_mode], outputs=None, _js="""(dark_mode)=>{apply_cookie_for_checkbox(dark_mode);}""") # 配置暗色主题或亮色主题 - demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}') - - # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 - def run_delayed_tasks(): - import threading, webbrowser, time - print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") - else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") - - def auto_updates(): time.sleep(0); auto_update() - def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}") - def warm_up_mods(): time.sleep(6); warm_up_modules() - - threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新 - threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面 - threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块 - - run_delayed_tasks() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) - - - # 如果需要在二级路径下运行 - # CUSTOM_PATH = get_conf('CUSTOM_PATH') - # if CUSTOM_PATH != "/": - # from toolbox import run_gradio_in_subpath - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) - # else: - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) - -if __name__ == "__main__": - main() +import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 +import gradio as gr +from predict import predict +from toolbox import format_io, find_free_port + +# 建议您复制一个config_private.py放自己的秘密,如API和代理网址,避免不小心传github被别人看到 +try: from config_private import proxies, WEB_PORT +except: from config import proxies, WEB_PORT + +# 如果WEB_PORT是-1,则随机选取WEB端口 +PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT + +initial_prompt = "Serve me as a writing and programming assistant." +title_html = """

ChatGPT 学术优化

""" + +# 问询记录,python 版本建议3.9+(越新越好) +import logging +os.makedirs('gpt_log', exist_ok=True) +try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8') +except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO) +print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log,请注意自我隐私保护哦!') + +# 一些普通功能模块 +from functional import get_functionals +functional = get_functionals() + +# 对一些丧心病狂的实验性功能模块进行测试 +from functional_crazy import get_crazy_functionals, on_file_uploaded, on_report_generated +crazy_functional = get_crazy_functionals() + +# 处理markdown文本格式的转变 +gr.Chatbot.postprocess = format_io + +# 做一些样式上的调整 +try: set_theme = gr.themes.Default( primary_hue=gr.themes.utils.colors.orange, + font=["ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")], + font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")]) +except: + set_theme = None; print('gradio版本较旧,不能自定义字体和颜色') + +with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo: + gr.HTML(title_html) + # To add a Duplicate Space badge + gr.HTML('''
Duplicate Space复制空间以避免排队并使用您的 OpenAI API 密钥安全运行
''') + + with gr.Row(): + with gr.Column(scale=2): + chatbot = gr.Chatbot() + chatbot.style(height=1000) + chatbot.style() + history = gr.State([]) + TRUE = gr.State(True) + FALSE = gr.State(False) + with gr.Column(scale=1): + with gr.Row(): + with gr.Column(scale=12): + api = gr.Textbox(show_label=False, placeholder="Input OpenAI Key.").style(container=False) + with gr.Row(): + with gr.Column(scale=12): + txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False) + with gr.Column(scale=1): + submitBtn = gr.Button("Ask", variant="primary") + with gr.Row(): + for k in functional: + variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" + functional[k]["Button"] = gr.Button(k, variant=variant) + with gr.Row(): + gr.Markdown("以下部分实验性功能需从input框读取路径.") + with gr.Row(): + for k in crazy_functional: + variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary" + crazy_functional[k]["Button"] = gr.Button(k, variant=variant) + with gr.Row(): + gr.Markdown("上传本地文件供上面的实验性功能调用.") + with gr.Row(): + file_upload = gr.Files(label='任何文件,但推荐上传压缩文件(zip, tar)', file_count="multiple") + + from check_proxy import check_proxy + statusDisplay = gr.Markdown(f"{check_proxy(proxies)}") + systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True) + #inputs, top_p, temperature, top_k, repetition_penalty + with gr.Accordion("arguments", open=False): + top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) + temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",) + + txt.submit(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay]) + submitBtn.click(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True) + for k in functional: + functional[k]["Button"].click(predict, + [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True) + file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt]) + for k in crazy_functional: + click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"], + [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay] + ) + try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) + except: pass + + +# 延迟函数,做一些准备工作,最后尝试打开浏览器 +def auto_opentab_delay(): + import threading, webbrowser, time + print(f"URL http://localhost:{PORT}") + def open(): time.sleep(2) + webbrowser.open_new_tab(f'http://localhost:{PORT}') + t = threading.Thread(target=open) + t.daemon = True; t.start() + +auto_opentab_delay() +demo.title = "ChatGPT 学术优化" +demo.queue().launch() diff --git a/appx.py b/appx.py new file mode 100644 index 0000000000000000000000000000000000000000..a699bc5b3c2e987102ca93e0ee28d601e0a93d02 --- /dev/null +++ b/appx.py @@ -0,0 +1,7 @@ +import gradio as gr + +def greet(name): + return "Hello " + name + "!!" + +iface = gr.Interface(fn=greet, inputs="text", outputs="text") +iface.launch() \ No newline at end of file diff --git a/check_proxy.py b/check_proxy.py index 2df818559b16dde2999143bc4824e0aa1f3e97b8..d6263ad981272b0a798bf278a9e83b99e6928711 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -5,17 +5,9 @@ def check_proxy(proxies): try: response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4) data = response.json() - if 'country_name' in data: - country = data['country_name'] - result = f"代理配置 {proxies_https}, 代理所在地:{country}" - elif 'error' in data: - alternative = _check_with_backup_source(proxies) - if alternative is None: - result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限" - else: - result = f"代理配置 {proxies_https}, 代理所在地:{alternative}" - else: - result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}" + print(f'查询代理的地理位置,返回的结果是{data}') + country = data['country_name'] + result = f"代理配置 {proxies_https}, 代理所在地:{country}" print(result) return result except: @@ -23,154 +15,8 @@ def check_proxy(proxies): print(result) return result -def _check_with_backup_source(proxies): - import random, string, requests - random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=32)) - try: return requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()['dns']['geo'] - except: return None -def backup_and_download(current_version, remote_version): - """ - 一键更新协议:备份和下载 - """ - from toolbox import get_conf - import shutil - import os - import requests - import zipfile - os.makedirs(f'./history', exist_ok=True) - backup_dir = f'./history/backup-{current_version}/' - new_version_dir = f'./history/new-version-{remote_version}/' - if os.path.exists(new_version_dir): - return new_version_dir - os.makedirs(new_version_dir) - shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history']) - proxies = get_conf('proxies') - try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True) - except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True) - zip_file_path = backup_dir+'/master.zip' - with open(zip_file_path, 'wb+') as f: - f.write(r.content) - dst_path = new_version_dir - with zipfile.ZipFile(zip_file_path, "r") as zip_ref: - for zip_info in zip_ref.infolist(): - dst_file_path = os.path.join(dst_path, zip_info.filename) - if os.path.exists(dst_file_path): - os.remove(dst_file_path) - zip_ref.extract(zip_info, dst_path) - return new_version_dir - - -def patch_and_restart(path): - """ - 一键更新协议:覆盖和重启 - """ - from distutils import dir_util - import shutil - import os - import sys - import time - import glob - from colorful import print亮黄, print亮绿, print亮红 - # if not using config_private, move origin config.py as config_private.py - if not os.path.exists('config_private.py'): - print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,', - '另外您可以随时在history子文件夹下找回旧版的程序。') - shutil.copyfile('config.py', 'config_private.py') - path_new_version = glob.glob(path + '/*-master')[0] - dir_util.copy_tree(path_new_version, './') - print亮绿('代码已经更新,即将更新pip包依赖……') - for i in reversed(range(5)): time.sleep(1); print(i) - try: - import subprocess - subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt']) - except: - print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') - print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启') - print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') - print(' ------------------------------ -----------------------------------') - for i in reversed(range(8)): time.sleep(1); print(i) - os.execl(sys.executable, sys.executable, *sys.argv) - - -def get_current_version(): - import json - try: - with open('./version', 'r', encoding='utf8') as f: - current_version = json.loads(f.read())['version'] - except: - current_version = "" - return current_version - - -def auto_update(raise_error=False): - """ - 一键更新协议:查询版本和用户意见 - """ - try: - from toolbox import get_conf - import requests - import json - proxies = get_conf('proxies') - try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5) - except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5) - remote_json_data = json.loads(response.text) - remote_version = remote_json_data['version'] - if remote_json_data["show_feature"]: - new_feature = "新功能:" + remote_json_data["new_feature"] - else: - new_feature = "" - with open('./version', 'r', encoding='utf8') as f: - current_version = f.read() - current_version = json.loads(current_version)['version'] - if (remote_version - current_version) >= 0.01-1e-5: - from colorful import print亮黄 - print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}') - print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n') - user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?') - if user_instruction in ['Y', 'y']: - path = backup_and_download(current_version, remote_version) - try: - patch_and_restart(path) - except: - msg = '更新失败。' - if raise_error: - from toolbox import trimmed_format_exc - msg += trimmed_format_exc() - print(msg) - else: - print('自动更新程序:已禁用') - return - else: - return - except: - msg = '自动更新程序:已禁用。建议排查:代理网络配置。' - if raise_error: - from toolbox import trimmed_format_exc - msg += trimmed_format_exc() - print(msg) - -def warm_up_modules(): - print('正在执行一些模块的预热 ...') - from toolbox import ProxyNetworkActivate - from request_llms.bridge_all import model_info - with ProxyNetworkActivate("Warmup_Modules"): - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - enc.encode("模块预热", disallowed_special=()) - enc = model_info["gpt-4"]['tokenizer'] - enc.encode("模块预热", disallowed_special=()) - -def warm_up_vectordb(): - print('正在执行一些模块的预热 ...') - from toolbox import ProxyNetworkActivate - with ProxyNetworkActivate("Warmup_Modules"): - import nltk - with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt") - - if __name__ == '__main__': - import os - os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 - from toolbox import get_conf - proxies = get_conf('proxies') - check_proxy(proxies) + try: from config_private import proxies # 放自己的秘密如API和代理网址 os.path.exists('config_private.py') + except: from config import proxies + check_proxy(proxies) \ No newline at end of file diff --git a/colorful.py b/colorful.py deleted file mode 100644 index 9749861f7e59151cda40ec7b7cbc4ea814b88d71..0000000000000000000000000000000000000000 --- a/colorful.py +++ /dev/null @@ -1,61 +0,0 @@ -import platform -from sys import stdout - -if platform.system()=="Linux": - pass -else: - from colorama import init - init() - -# Do you like the elegance of Chinese characters? -def print红(*kw,**kargs): - print("\033[0;31m",*kw,"\033[0m",**kargs) -def print绿(*kw,**kargs): - print("\033[0;32m",*kw,"\033[0m",**kargs) -def print黄(*kw,**kargs): - print("\033[0;33m",*kw,"\033[0m",**kargs) -def print蓝(*kw,**kargs): - print("\033[0;34m",*kw,"\033[0m",**kargs) -def print紫(*kw,**kargs): - print("\033[0;35m",*kw,"\033[0m",**kargs) -def print靛(*kw,**kargs): - print("\033[0;36m",*kw,"\033[0m",**kargs) - -def print亮红(*kw,**kargs): - print("\033[1;31m",*kw,"\033[0m",**kargs) -def print亮绿(*kw,**kargs): - print("\033[1;32m",*kw,"\033[0m",**kargs) -def print亮黄(*kw,**kargs): - print("\033[1;33m",*kw,"\033[0m",**kargs) -def print亮蓝(*kw,**kargs): - print("\033[1;34m",*kw,"\033[0m",**kargs) -def print亮紫(*kw,**kargs): - print("\033[1;35m",*kw,"\033[0m",**kargs) -def print亮靛(*kw,**kargs): - print("\033[1;36m",*kw,"\033[0m",**kargs) - -# Do you like the elegance of Chinese characters? -def sprint红(*kw): - return "\033[0;31m"+' '.join(kw)+"\033[0m" -def sprint绿(*kw): - return "\033[0;32m"+' '.join(kw)+"\033[0m" -def sprint黄(*kw): - return "\033[0;33m"+' '.join(kw)+"\033[0m" -def sprint蓝(*kw): - return "\033[0;34m"+' '.join(kw)+"\033[0m" -def sprint紫(*kw): - return "\033[0;35m"+' '.join(kw)+"\033[0m" -def sprint靛(*kw): - return "\033[0;36m"+' '.join(kw)+"\033[0m" -def sprint亮红(*kw): - return "\033[1;31m"+' '.join(kw)+"\033[0m" -def sprint亮绿(*kw): - return "\033[1;32m"+' '.join(kw)+"\033[0m" -def sprint亮黄(*kw): - return "\033[1;33m"+' '.join(kw)+"\033[0m" -def sprint亮蓝(*kw): - return "\033[1;34m"+' '.join(kw)+"\033[0m" -def sprint亮紫(*kw): - return "\033[1;35m"+' '.join(kw)+"\033[0m" -def sprint亮靛(*kw): - return "\033[1;36m"+' '.join(kw)+"\033[0m" diff --git a/config.py b/config.py index a536a181a31b2d35beb4b38937f77a2087b5bde4..90858e04fe1377634904bcc5ad6ba9dbd324f4fc 100644 --- a/config.py +++ b/config.py @@ -1,370 +1,29 @@ -""" - 以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。 - 读取优先级:环境变量 > config_private.py > config.py - --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- - All the following configurations also support using environment variables to override, - and the environment variable configuration format can be seen in docker-compose.yml. - Configuration reading priority: environment variable > config_private.py > config.py -""" +# API_KEY = "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" 此key无效 +API_KEY = "sk-此处填API秘钥" +API_URL = "https://api.openai.com/v1/chat/completions" -# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 -API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4" - - -# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 -API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4" - - -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 +# 改为True应用代理 USE_PROXY = False if USE_PROXY: - """ - 代理网络的地址,打开你的代理软件查看代理协议(socks5h / http)、地址(localhost)和端口(11284) - 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改 - <配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1> - [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http - [地址] 填localhost或者127.0.0.1(localhost意思是代理软件安装在本机上) - [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - """ - proxies = { - # [协议]:// [地址] :[端口] - "http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890", - "https": "socks5h://localhost:11284", # 再例如 "https": "http://127.0.0.1:7890", - } + # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) + proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", } + print('网络代理状态:运行。') else: proxies = None - -# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------ - -# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) -# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} -# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"} -API_URL_REDIRECT = {} - - -# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次 -# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview -DEFAULT_WORKER_NUM = 3 - - -# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"] -# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...] -THEME = "Chuanhu-Small-and-Beautiful" -AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"] - - -# 默认的系统提示词(system prompt) -INIT_SYS_PROMPT = "Serve me as a writing and programming assistant." - - -# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效) -CHATBOT_HEIGHT = 1115 - - -# 代码高亮 -CODE_HIGHLIGHT = True - - -# 窗口布局 -LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) - - -# 暗色模式 / 亮色模式 -DARK_MODE = False - + print('网络代理状态:未配置。无代理状态下很可能无法访问。') # 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 30 - +TIMEOUT_SECONDS = 20 # 网页的端口, -1代表随机端口 WEB_PORT = -1 - # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 MAX_RETRY = 2 -# OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm" -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo", "spark", "azure-gpt-3.5"] - -# 插件分类默认选项 -DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] - - -# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) -LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", - "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", - "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo", - "gemini-pro", "chatglm3", "claude-2"] -# P.S. 其他可用的模型还包括 [ -# "moss", "qwen-turbo", "qwen-plus", "qwen-max" -# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613", -# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', -# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama" -# ] - - -# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" -MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" - - -# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用) -# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型 -# 也可以是具体的模型路径 -QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8" - - -# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/ -DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY - - -# 百度千帆(LLM_MODEL="qianfan") -BAIDU_CLOUD_API_KEY = '' -BAIDU_CLOUD_SECRET_KEY = '' -BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat" - - -# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径 -CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100" - - -# 本地LLM模型如ChatGLM的执行方式 CPU/GPU -LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" -LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本 - -# 设置gradio的并行线程数(不需要修改) -CONCURRENT_COUNT = 100 - - -# 是否在提交时自动清空输入框 -AUTO_CLEAR_TXT = False - - -# 加一个live2d装饰 -ADD_WAIFU = True - - -# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个) -# [("username", "password"), ("username2", "password2"), ...] -AUTHENTICATION = [] - - -# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!) -CUSTOM_PATH = "/" - - -# HTTPS 秘钥和证书(不需要修改) -SSL_KEYFILE = "" -SSL_CERTFILE = "" - - -# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用 -API_ORG = "" - - -# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md -SLACK_CLAUDE_BOT_ID = '' -SLACK_CLAUDE_USER_TOKEN = '' - - -# 如果需要使用AZURE(方法一:单个azure模型部署)详情请见额外文档 docs\use_azure.md -AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/" -AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用 -AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md - - -# 如果需要使用AZURE(方法二:多个azure模型部署+动态切换)详情请见额外文档 docs\use_azure.md -AZURE_CFG_ARRAY = {} - - -# 使用Newbing (不推荐使用,未来将删除) -NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] -NEWBING_COOKIES = """ -put your new bing cookies here -""" - - -# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md -ENABLE_AUDIO = False -ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f -ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK -ALIYUN_ACCESSKEY="" # (无需填写) -ALIYUN_SECRET="" # (无需填写) - - -# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat -XFYUN_APPID = "00000000" -XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" -XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - - -# 接入智谱大模型 -ZHIPUAI_API_KEY = "" -ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写 - - -# # 火山引擎YUNQUE大模型 -# YUNQUE_SECRET_KEY = "" -# YUNQUE_ACCESS_KEY = "" -# YUNQUE_MODEL = "" - - -# Claude API KEY -ANTHROPIC_API_KEY = "" - - -# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号 -MATHPIX_APPID = "" -MATHPIX_APPKEY = "" - - -# 自定义API KEY格式 -CUSTOM_API_KEY_PATTERN = "" - - -# Google Gemini API-Key -GEMINI_API_KEY = '' - - -# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens -HUGGINGFACE_ACCESS_TOKEN = "" - - -# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档 -# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space" -GROBID_URLS = [ - "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space", - "https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space", - "https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space", -] - - -# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭 -ALLOW_RESET_CONFIG = False - - -# 在使用AutoGen插件时,是否使用Docker容器运行代码 -AUTOGEN_USE_DOCKER = False - - -# 临时的上传文件夹位置,请勿修改 -PATH_PRIVATE_UPLOAD = "private_upload" - - -# 日志文件夹的位置,请勿修改 -PATH_LOGGING = "gpt_log" - - -# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 -WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", - "Warmup_Modules", "Nougat_Download", "AutoGen"] - - -# *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用 -BLOCK_INVALID_APIKEY = False - - -# 启用插件热加载 -PLUGIN_HOT_RELOAD = False - - -# 自定义按钮的最大数量限制 -NUM_CUSTOM_BASIC_BTN = 4 - -""" -在线大模型配置关联关系示意图 -│ -├── "gpt-3.5-turbo" 等openai模型 -│ ├── API_KEY -│ ├── CUSTOM_API_KEY_PATTERN(不常用) -│ ├── API_ORG(不常用) -│ └── API_URL_REDIRECT(不常用) -│ -├── "azure-gpt-3.5" 等azure模型(单个azure模型,不需要动态切换) -│ ├── API_KEY -│ ├── AZURE_ENDPOINT -│ ├── AZURE_API_KEY -│ ├── AZURE_ENGINE -│ └── API_URL_REDIRECT -│ -├── "azure-gpt-3.5" 等azure模型(多个azure模型,需要动态切换,高优先级) -│ └── AZURE_CFG_ARRAY -│ -├── "spark" 星火认知大模型 spark & sparkv2 -│ ├── XFYUN_APPID -│ ├── XFYUN_API_SECRET -│ └── XFYUN_API_KEY -│ -├── "claude-1-100k" 等claude模型 -│ └── ANTHROPIC_API_KEY -│ -├── "stack-claude" -│ ├── SLACK_CLAUDE_BOT_ID -│ └── SLACK_CLAUDE_USER_TOKEN -│ -├── "qianfan" 百度千帆大模型库 -│ ├── BAIDU_CLOUD_QIANFAN_MODEL -│ ├── BAIDU_CLOUD_API_KEY -│ └── BAIDU_CLOUD_SECRET_KEY -│ -├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型 -│ └── ZHIPUAI_API_KEY -│ -├── "qwen-turbo" 等通义千问大模型 -│ └── DASHSCOPE_API_KEY -│ -├── "Gemini" -│ └── GEMINI_API_KEY -│ -└── "newbing" Newbing接口不再稳定,不推荐使用 - ├── NEWBING_STYLE - └── NEWBING_COOKIES - - -本地大模型示意图 -│ -├── "chatglm3" -├── "chatglm" -├── "chatglm_onnx" -├── "chatglmft" -├── "internlm" -├── "moss" -├── "jittorllms_pangualpha" -├── "jittorllms_llama" -├── "deepseekcoder" -├── "qwen-local" -├── RWKV的支持见Wiki -└── "llama2" - - -用户图形界面布局依赖关系示意图 -│ -├── CHATBOT_HEIGHT 对话窗的高度 -├── CODE_HIGHLIGHT 代码高亮 -├── LAYOUT 窗口布局 -├── DARK_MODE 暗色模式 / 亮色模式 -├── DEFAULT_FN_GROUPS 插件分类默认选项 -├── THEME 色彩主题 -├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框 -├── ADD_WAIFU 加一个live2d装饰 -└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性 - - -插件在线服务配置依赖关系示意图 -│ -├── 语音功能 -│ ├── ENABLE_AUDIO -│ ├── ALIYUN_TOKEN -│ ├── ALIYUN_APPKEY -│ ├── ALIYUN_ACCESSKEY -│ └── ALIYUN_SECRET -│ -└── PDF文档精准解析 - ├── GROBID_URLS - ├── MATHPIX_APPID - └── MATHPIX_APPKEY - +# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放) +LLM_MODEL = "gpt-3.5-turbo" -""" +# # 检查一下是不是忘了改config +# if API_KEY == "sk-此处填API秘钥": +# assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行" \ No newline at end of file diff --git a/core_functional.py b/core_functional.py deleted file mode 100644 index 4074cddb27b4f10c86b803df37005f516bfd8f58..0000000000000000000000000000000000000000 --- a/core_functional.py +++ /dev/null @@ -1,173 +0,0 @@ -# 'primary' 颜色对应 theme.py 中的 primary_hue -# 'secondary' 颜色对应 theme.py 中的 neutral_hue -# 'stop' 颜色对应 theme.py 中的 color_er -import importlib -from toolbox import clear_line_break -from toolbox import apply_gpt_academic_string_mask_langbased -from toolbox import build_gpt_academic_masked_string_langbased -from textwrap import dedent - -def get_core_functions(): - return { - - "学术语料润色": { - # [1*] 前缀字符串,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等。 - # 这里填一个提示词字符串就行了,这里为了区分中英文情景搞复杂了一点 - "Prefix": build_gpt_academic_masked_string_langbased( - text_show_english= - r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " - r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " - r"Firstly, you should provide the polished paragraph. " - r"Secondly, you should list all your modification and explain the reasons to do so in markdown table.", - text_show_chinese= - r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," - r"同时分解长句,减少重复,并提供改进建议。请先提供文本的更正版本,然后在markdown表格中列出修改的内容,并给出修改的理由:" - ) + "\n\n", - # [2*] 后缀字符串,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 - "Suffix": r"", - # [3] 按钮颜色 (可选参数,默认 secondary) - "Color": r"secondary", - # [4] 按钮是否可见 (可选参数,默认 True,即可见) - "Visible": True, - # [5] 是否在触发时清除历史 (可选参数,默认 False,即不处理之前的对话历史) - "AutoClearHistory": False, - # [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符) - "PreProcess": None, - }, - - - "总结绘制脑图": { - # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 - "Prefix": r"", - # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 - "Suffix": - # dedent() 函数用于去除多行字符串的缩进 - dedent("\n"+r''' - ============================== - - 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如: - - 以下是对以上文本的总结,以mermaid flowchart的形式展示: - ```mermaid - flowchart LR - A["节点名1"] --> B("节点名2") - B --> C{"节点名3"} - C --> D["节点名4"] - C --> |"箭头名1"| E["节点名5"] - C --> |"箭头名2"| F["节点名6"] - ``` - - 警告: - (1)使用中文 - (2)节点名字使用引号包裹,如["Laptop"] - (3)`|` 和 `"`之间不要存在空格 - (4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下) - '''), - }, - - - "查找语法错误": { - "Prefix": r"Help me ensure that the grammar and the spelling is correct. " - r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. " - r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " - r"put the original text the first column, " - r"put the corrected text in the second column and highlight the key words you fixed. " - r"Finally, please provide the proofreaded text.""\n\n" - r"Example:""\n" - r"Paragraph: How is you? Do you knows what is it?""\n" - r"| Original sentence | Corrected sentence |""\n" - r"| :--- | :--- |""\n" - r"| How **is** you? | How **are** you? |""\n" - r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n" - r"Below is a paragraph from an academic paper. " - r"You need to report all grammar and spelling mistakes as the example before." - + "\n\n", - "Suffix": r"", - "PreProcess": clear_line_break, # 预处理:清除换行符 - }, - - - "中译英": { - "Prefix": r"Please translate following sentence to English:" + "\n\n", - "Suffix": r"", - }, - - - "学术英中互译": { - "Prefix": build_gpt_academic_masked_string_langbased( - text_show_chinese= - r"I want you to act as a scientific English-Chinese translator, " - r"I will provide you with some paragraphs in one language " - r"and your task is to accurately and academically translate the paragraphs only into the other language. " - r"Do not repeat the original provided paragraphs after translation. " - r"You should use artificial intelligence tools, " - r"such as natural language processing, and rhetorical knowledge " - r"and experience about effective writing techniques to reply. " - r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:", - text_show_english= - r"你是经验丰富的翻译,请把以下学术文章段落翻译成中文," - r"并同时充分考虑中文的语法、清晰、简洁和整体可读性," - r"必要时,你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯。" - r"你需要翻译的文本如下:" - ) + "\n\n", - "Suffix": r"", - }, - - - "英译中": { - "Prefix": r"翻译成地道的中文:" + "\n\n", - "Suffix": r"", - "Visible": False, - }, - - - "找图片": { - "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," - r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n", - "Suffix": r"", - "Visible": False, - }, - - - "解释代码": { - "Prefix": r"请解释以下代码:" + "\n```\n", - "Suffix": "\n```\n", - }, - - - "参考文献转Bib": { - "Prefix": r"Here are some bibliography items, please transform them into bibtex style." - r"Note that, reference styles maybe more than one kind, you should transform each item correctly." - r"Items need to be transformed:" + "\n\n", - "Visible": False, - "Suffix": r"", - } - } - - -def handle_core_functionality(additional_fn, inputs, history, chatbot): - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - addition = chatbot._cookies['customize_fn_overwrite'] - if additional_fn in addition: - # 自定义功能 - inputs = addition[additional_fn]["Prefix"] + inputs + addition[additional_fn]["Suffix"] - return inputs, history - else: - # 预制功能 - if "PreProcess" in core_functional[additional_fn]: - if core_functional[additional_fn]["PreProcess"] is not None: - inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - # 为字符串加上上面定义的前缀和后缀。 - inputs = apply_gpt_academic_string_mask_langbased( - string = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"], - lang_reference = inputs, - ) - if core_functional[additional_fn].get("AutoClearHistory", False): - history = [] - return inputs, history - -if __name__ == "__main__": - t = get_core_functions()["总结绘制脑图"] - print(t["Prefix"] + t["Suffix"]) \ No newline at end of file diff --git a/crazy_functional.py b/crazy_functional.py deleted file mode 100644 index 3e998e56fce91582ab89d2c7e7b41eb94eabdf8d..0000000000000000000000000000000000000000 --- a/crazy_functional.py +++ /dev/null @@ -1,723 +0,0 @@ -from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 -from toolbox import trimmed_format_exc - - -def get_crazy_functions(): - from crazy_functions.读文章写摘要 import 读文章写摘要 - from crazy_functions.生成函数注释 import 批量生成函数注释 - from crazy_functions.解析项目源代码 import 解析项目本身 - from crazy_functions.解析项目源代码 import 解析一个Python项目 - from crazy_functions.解析项目源代码 import 解析一个Matlab项目 - from crazy_functions.解析项目源代码 import 解析一个C项目的头文件 - from crazy_functions.解析项目源代码 import 解析一个C项目 - from crazy_functions.解析项目源代码 import 解析一个Golang项目 - from crazy_functions.解析项目源代码 import 解析一个Rust项目 - from crazy_functions.解析项目源代码 import 解析一个Java项目 - from crazy_functions.解析项目源代码 import 解析一个前端项目 - from crazy_functions.高级功能函数模板 import 高阶功能模板函数 - from crazy_functions.Latex全文润色 import Latex英文润色 - from crazy_functions.询问多个大语言模型 import 同时问询 - from crazy_functions.解析项目源代码 import 解析一个Lua项目 - from crazy_functions.解析项目源代码 import 解析一个CSharp项目 - from crazy_functions.总结word文档 import 总结word文档 - from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - from crazy_functions.对话历史存档 import 对话历史存档 - from crazy_functions.对话历史存档 import 载入对话历史存档 - from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 - from crazy_functions.辅助功能 import 清除缓存 - from crazy_functions.批量Markdown翻译 import Markdown英译中 - from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入 - from crazy_functions.Latex全文润色 import Latex中文润色 - from crazy_functions.Latex全文润色 import Latex英文纠错 - from crazy_functions.批量Markdown翻译 import Markdown中译英 - from crazy_functions.虚空终端 import 虚空终端 - from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表 - - function_plugins = { - "虚空终端": { - "Group": "对话|编程|学术|智能体", - "Color": "stop", - "AsButton": True, - "Function": HotReload(虚空终端), - }, - "解析整个Python项目": { - "Group": "编程", - "Color": "stop", - "AsButton": True, - "Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径", - "Function": HotReload(解析一个Python项目), - }, - "载入对话历史存档(先上传存档或输入路径)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "Info": "载入对话历史存档 | 输入参数为路径", - "Function": HotReload(载入对话历史存档), - }, - "删除所有本地对话历史记录(谨慎操作)": { - "Group": "对话", - "AsButton": False, - "Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数", - "Function": HotReload(删除所有本地对话历史记录), - }, - "清除所有缓存文件(谨慎操作)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", - "Function": HotReload(清除缓存), - }, - "生成多种Mermaid图表(从当前对话或路径(.pdf/.md/.docx)中生产图表)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断", - "Function": HotReload(生成多种Mermaid图表), - "AdvancedArgs": True, - "ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图", - }, - "批量总结Word文档": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "批量总结word文档 | 输入参数为路径", - "Function": HotReload(总结word文档), - }, - "解析整个Matlab项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, - "Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径", - "Function": HotReload(解析一个Matlab项目), - }, - "解析整个C++项目头文件": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径", - "Function": HotReload(解析一个C项目的头文件), - }, - "解析整个C++项目(.cpp/.hpp/.c/.h)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径", - "Function": HotReload(解析一个C项目), - }, - "解析整个Go项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Go项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Golang项目), - }, - "解析整个Rust项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Rust项目), - }, - "解析整个Java项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Java项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Java项目), - }, - "解析整个前端项目(js,ts,css等)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径", - "Function": HotReload(解析一个前端项目), - }, - "解析整个Lua项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个Lua项目), - }, - "解析整个CSharp项目": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径", - "Function": HotReload(解析一个CSharp项目), - }, - "解析Jupyter Notebook文件": { - "Group": "编程", - "Color": "stop", - "AsButton": False, - "Info": "解析Jupyter Notebook文件 | 输入参数为路径", - "Function": HotReload(解析ipynb文件), - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示 - }, - "读Tex论文写摘要": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "Info": "读取Tex论文并写摘要 | 输入参数为路径", - "Function": HotReload(读文章写摘要), - }, - "翻译README或MD": { - "Group": "编程", - "Color": "stop", - "AsButton": True, - "Info": "将Markdown翻译为中文 | 输入参数为路径或URL", - "Function": HotReload(Markdown英译中), - }, - "翻译Markdown或README(支持Github链接)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, - "Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL", - "Function": HotReload(Markdown英译中), - }, - "批量生成函数注释": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "批量生成函数的注释 | 输入参数为路径", - "Function": HotReload(批量生成函数注释), - }, - "保存当前的对话": { - "Group": "对话", - "AsButton": True, - "Info": "保存当前的对话 | 不需要输入参数", - "Function": HotReload(对话历史存档), - }, - "[多线程Demo]解析此项目本身(源码自译解)": { - "Group": "对话|编程", - "AsButton": False, # 加入下拉菜单中 - "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数", - "Function": HotReload(解析项目本身), - }, - "历史上的今天": { - "Group": "对话", - "AsButton": True, - "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数", - "Function": HotReload(高阶功能模板函数), - }, - "精准翻译PDF论文": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "精准翻译PDF论文为中文 | 输入参数为路径", - "Function": HotReload(批量翻译PDF文档), - }, - "询问多个GPT模型": { - "Group": "对话", - "Color": "stop", - "AsButton": True, - "Function": HotReload(同时问询), - }, - "批量总结PDF文档": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "批量总结PDF文档的内容 | 输入参数为路径", - "Function": HotReload(批量总结PDF文档), - }, - "谷歌学术检索助手(输入谷歌学术搜索页url)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL", - "Function": HotReload(谷歌检索小助手), - }, - "理解PDF文档内容 (模仿ChatPDF)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "理解PDF文档的内容并进行回答 | 输入参数为路径", - "Function": HotReload(理解PDF文档内容标准文件输入), - }, - "英文Latex项目全文润色(输入路径或上传压缩包)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包", - "Function": HotReload(Latex英文润色), - }, - - "中文Latex项目全文润色(输入路径或上传压缩包)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包", - "Function": HotReload(Latex中文润色), - }, - # 已经被新插件取代 - # "英文Latex项目全文纠错(输入路径或上传压缩包)": { - # "Group": "学术", - # "Color": "stop", - # "AsButton": False, # 加入下拉菜单中 - # "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包", - # "Function": HotReload(Latex英文纠错), - # }, - # 已经被新插件取代 - # "Latex项目全文中译英(输入路径或上传压缩包)": { - # "Group": "学术", - # "Color": "stop", - # "AsButton": False, # 加入下拉菜单中 - # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包", - # "Function": HotReload(Latex中译英) - # }, - # 已经被新插件取代 - # "Latex项目全文英译中(输入路径或上传压缩包)": { - # "Group": "学术", - # "Color": "stop", - # "AsButton": False, # 加入下拉菜单中 - # "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包", - # "Function": HotReload(Latex英译中) - # }, - "批量Markdown中译英(输入路径或上传压缩包)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包", - "Function": HotReload(Markdown中译英), - }, - } - - # -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=- - try: - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - - function_plugins.update( - { - "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - # "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695", - "Function": HotReload(下载arxiv论文并翻译摘要), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - - function_plugins.update( - { - "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题", - "Function": HotReload(连接网络回答问题), - } - } - ) - from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题 - - function_plugins.update( - { - "连接网络回答问题(中文Bing版,输入问题后点击该插件)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, # 加入下拉菜单中 - "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题", - "Function": HotReload(连接bing搜索回答问题), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.解析项目源代码 import 解析任意code项目 - - function_plugins.update( - { - "解析项目源代码(手动指定和筛选源代码文件类型)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示 - "Function": HotReload(解析任意code项目), - }, - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 - - function_plugins.update( - { - "询问多个GPT模型(手动指定询问哪些模型)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示 - "Function": HotReload(同时问询_指定模型), - }, - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2 - - function_plugins.update( - { - "图片生成_DALLE2 (先切换模型到gpt-*)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 - "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE2), - }, - } - ) - function_plugins.update( - { - "图片生成_DALLE3 (先切换模型到gpt-*)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示 - "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE3), - }, - } - ) - function_plugins.update( - { - "图片修改_DALLE2 (先切换模型到gpt-*)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False) - # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片修改_DALLE2), - }, - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.总结音视频 import 总结音视频 - - function_plugins.update( - { - "批量总结音视频(输入路径或上传压缩包)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", - "Info": "批量总结音频或视频 | 输入参数为路径", - "Function": HotReload(总结音视频), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.数学动画生成manim import 动画生成 - - function_plugins.update( - { - "数学动画生成(Manim)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "Info": "按照自然语言描述生成一个动画 | 输入参数是一段话", - "Function": HotReload(动画生成), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 - - function_plugins.update( - { - "Markdown翻译(指定翻译成何种语言)": { - "Group": "编程", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。", - "Function": HotReload(Markdown翻译指定语言), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.知识库问答 import 知识库文件注入 - - function_plugins.update( - { - "构建知识库(先上传文件素材,再运行此插件)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。", - "Function": HotReload(知识库文件注入), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.知识库问答 import 读取知识库作答 - - function_plugins.update( - { - "知识库文件注入(构建知识库后,再运行此插件)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。", - "Function": HotReload(读取知识库作答), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.交互功能函数模板 import 交互功能模板函数 - - function_plugins.update( - { - "交互功能模板Demo函数(查找wallhaven.cc的壁纸)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "Function": HotReload(交互功能模板函数), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比 - from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF - from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF - - function_plugins.update( - { - "Latex英文纠错+高亮修正位置 [需Latex]": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。", - "Function": HotReload(Latex英文纠错加PDF对比), - }, - "Arxiv论文精细翻译(输入arxivID)[需Latex]": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " - r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " - r'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695", - "Function": HotReload(Latex翻译中文并重新编译PDF), - }, - "本地Latex论文精细翻译(上传Latex项目)[需Latex]": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " - r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " - r'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Info": "本地Latex论文精细翻译 | 输入参数是路径", - "Function": HotReload(Latex翻译中文并重新编译PDF), - }, - "PDF翻译中文并重新编译PDF(上传PDF)[需Latex]": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, - "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " - r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " - r'If the term "agent" is used in this section, it should be translated to "智能体". ', - "Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径", - "Function": HotReload(PDF翻译中文并重新编译PDF) - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from toolbox import get_conf - - ENABLE_AUDIO = get_conf("ENABLE_AUDIO") - if ENABLE_AUDIO: - from crazy_functions.语音助手 import 语音助手 - - function_plugins.update( - { - "实时语音对话": { - "Group": "对话", - "Color": "stop", - "AsButton": True, - "Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数", - "Function": HotReload(语音助手), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档 - - function_plugins.update( - { - "精准翻译PDF文档(NOUGAT)": { - "Group": "学术", - "Color": "stop", - "AsButton": False, - "Function": HotReload(批量翻译PDF文档), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.函数动态生成 import 函数动态生成 - - function_plugins.update( - { - "动态代码解释器(CodeInterpreter)": { - "Group": "智能体", - "Color": "stop", - "AsButton": False, - "Function": HotReload(函数动态生成), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.多智能体 import 多智能体终端 - - function_plugins.update( - { - "AutoGen多智能体终端(仅供测试)": { - "Group": "智能体", - "Color": "stop", - "AsButton": False, - "Function": HotReload(多智能体终端), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - try: - from crazy_functions.互动小游戏 import 随机小游戏 - - function_plugins.update( - { - "随机互动小游戏(仅供测试)": { - "Group": "智能体", - "Color": "stop", - "AsButton": False, - "Function": HotReload(随机小游戏), - } - } - ) - except: - print(trimmed_format_exc()) - print("Load function plugin failed") - - # try: - # from crazy_functions.高级功能函数模板 import 测试图表渲染 - # function_plugins.update({ - # "绘制逻辑关系(测试图表渲染)": { - # "Group": "智能体", - # "Color": "stop", - # "AsButton": True, - # "Function": HotReload(测试图表渲染) - # } - # }) - # except: - # print(trimmed_format_exc()) - # print('Load function plugin failed') - - # try: - # from crazy_functions.chatglm微调工具 import 微调数据集生成 - # function_plugins.update({ - # "黑盒模型学习: 微调数据集生成 (先上传数据集)": { - # "Color": "stop", - # "AsButton": False, - # "AdvancedArgs": True, - # "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求:100字以内,用第二人称。' --system_prompt=''", - # "Function": HotReload(微调数据集生成) - # } - # }) - # except: - # print('Load function plugin failed') - - """ - 设置默认值: - - 默认 Group = 对话 - - 默认 AsButton = True - - 默认 AdvancedArgs = False - - 默认 Color = secondary - """ - for name, function_meta in function_plugins.items(): - if "Group" not in function_meta: - function_plugins[name]["Group"] = "对话" - if "AsButton" not in function_meta: - function_plugins[name]["AsButton"] = True - if "AdvancedArgs" not in function_meta: - function_plugins[name]["AdvancedArgs"] = False - if "Color" not in function_meta: - function_plugins[name]["Color"] = "secondary" - - return function_plugins diff --git a/crazy_functions/CodeInterpreter.py b/crazy_functions/CodeInterpreter.py deleted file mode 100644 index 283dd87a93140c5621579e62c9d6d368537e4824..0000000000000000000000000000000000000000 --- a/crazy_functions/CodeInterpreter.py +++ /dev/null @@ -1,232 +0,0 @@ -from collections.abc import Callable, Iterable, Mapping -from typing import Any -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc -from toolbox import promote_file_to_downloadzone, get_log_folder -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping, try_install_deps -from multiprocessing import Process, Pipe -import os -import time - -templete = """ -```python -import ... # Put dependencies here, e.g. import numpy as np - -class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` - - def run(self, path): # The name of the function must be `run`, it takes only a positional argument. - # rewrite the function you have just written here - ... - return generated_file_path -``` -""" - -def inspect_dependency(chatbot, history): - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return True - -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) == 1: - return matches[0].strip('python') # code block - for match in matches: - if 'class TerminalFunction' in match: - return match.strip('python') # code block - raise RuntimeError("GPT is not generating proper code.") - -def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): - # 输入 - prompt_compose = [ - f'Your job:\n' - f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n', - f"2. You should write this function to perform following task: " + txt + "\n", - f"3. Wrap the output python function with markdown codeblock." - ] - i_say = "".join(prompt_compose) - demo = [] - - # 第一步 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, - sys_prompt= r"You are a programmer." - ) - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # 第二步 - prompt_compose = [ - "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n", - templete - ] - i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt= r"You are a programmer." - ) - code_to_return = gpt_say - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # # 第三步 - # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." - # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' - # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=inputs_show_user, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - # sys_prompt= r"You are a programmer." - # ) - # # # 第三步 - # i_say = "Show me how to use `pip` to install packages to run the code above. " - # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' - # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=i_say, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - # sys_prompt= r"You are a programmer." - # ) - installation_advance = "" - - return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history - -def make_module(code): - module_file = 'gpt_fn_' + gen_time_str().replace('-','_') - with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f: - f.write(code) - - def get_class_name(class_string): - import re - # Use regex to extract the class name - class_name = re.search(r'class (\w+)\(', class_string).group(1) - return class_name - - class_name = get_class_name(code) - return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}" - -def init_module_instance(module): - import importlib - module_, class_ = module.split('->') - init_f = getattr(importlib.import_module(module_), class_) - return init_f() - -def for_immediate_show_off_when_possible(file_type, fp, chatbot): - if file_type in ['png', 'jpg']: - image_path = os.path.abspath(fp) - chatbot.append(['这是一张图片, 展示如下:', - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - return chatbot - -def subprocess_worker(instance, file_path, return_dict): - return_dict['result'] = instance.run(file_path) - -def have_any_recent_upload_files(chatbot): - _5min = 5 * 60 - if not chatbot: return False # chatbot is None - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - if not most_recent_uploaded: return False # most_recent_uploaded is None - if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new - else: return False # most_recent_uploaded is too old - -def get_recent_file_prompt_support(chatbot): - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - path = most_recent_uploaded['path'] - return path - -@CatchException -def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - raise NotImplementedError - - # 清空历史,以免输入溢出 - history = []; clear_file_downloadzone(chatbot) - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 插件初始化中 ..." - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if have_any_recent_upload_files(chatbot): - file_path = get_recent_file_prompt_support(chatbot) - else: - chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 读取文件 - if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files") - recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None) - file_path = recently_uploaded_files[-1] - file_type = file_path.split('.')[-1] - - # 粗心检查 - if is_the_upload_folder(txt): - chatbot.append([ - "...", - f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)" - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始干正事 - for j in range(5): # 最多重试5次 - try: - code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \ - yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history) - code = get_code_block(code) - res = make_module(code) - instance = init_module_instance(res) - break - except Exception as e: - chatbot.append([f"第{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 代码生成结束, 开始执行 - try: - import multiprocessing - manager = multiprocessing.Manager() - return_dict = manager.dict() - - p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict)) - # only has 10 seconds to run - p.start(); p.join(timeout=10) - if p.is_alive(): p.terminate(); p.join() - p.close() - res = return_dict['result'] - # res = instance.run(file_path) - except Exception as e: - chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) - # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 顺利完成,收尾 - res = str(res) - if os.path.exists(res): - chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res]) - new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - else: - chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -""" -测试: - 裁剪图像,保留下半部分 - 交换图像的蓝色通道和红色通道 - 将图像转为灰度图像 - 将csv文件转excel表格 -""" \ No newline at end of file diff --git "a/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py" "b/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py" deleted file mode 100644 index 8433895f538e826e4294b7d6503583aafc2b34c8..0000000000000000000000000000000000000000 --- "a/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py" +++ /dev/null @@ -1,106 +0,0 @@ -from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything - - - -@CatchException -def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - - # < --------------------读取参数--------------- > - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - kai_id = plugin_kwargs.get("advanced_arg", 'default') - - chatbot.append((f"向`{kai_id}`知识库中添加文件。", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # resolve deps - try: - from zh_langchain import construct_vector_store - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from .crazy_utils import knowledge_archive_interface - except Exception as e: - chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - from .crazy_utils import try_install_deps - try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) - yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) - return - - # < --------------------读取文件--------------- > - file_manifest = [] - spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"] - for sp in spl: - _, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}') - file_manifest += file_manifest_tmp - - if len(file_manifest) == 0: - chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # < -------------------预热文本向量化模组--------------- > - chatbot.append(['
'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Checking Text2vec ...') - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 - HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") - - # < -------------------构建知识库--------------- > - chatbot.append(['
'.join(file_manifest), "正在构建知识库..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Establishing knowledge archive ...') - with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 - kai = knowledge_archive_interface() - kai.feed_archive(file_manifest=file_manifest, id=kai_id) - kai_files = kai.get_loaded_file() - kai_files = '
'.join(kai_files) - # chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"]) - # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id() - # chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答' - # chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"]) - chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - -@CatchException -def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1): - # resolve deps - try: - from zh_langchain import construct_vector_store - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from .crazy_utils import knowledge_archive_interface - except Exception as e: - chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - from .crazy_utils import try_install_deps - try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) - yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) - return - - # < ------------------- --------------- > - kai = knowledge_archive_interface() - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - kai_id = plugin_kwargs.get("advanced_arg", 'default') - resp, prompt = kai.answer_with_archive_by_id(txt, kai_id) - - chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt=system_prompt - ) - history.extend((prompt, gpt_say)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" "b/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" deleted file mode 100644 index 3bd0613d4dcf7fd8b535e6a857b14130f85b2df9..0000000000000000000000000000000000000000 --- "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" +++ /dev/null @@ -1,245 +0,0 @@ -from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder -from toolbox import CatchException, report_exception, write_history_to_file, zip_folder - - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - - print('Segmentation: done') - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + '.polish.tex', 'w', encoding='utf8') as f: - manifest.append(path + '.polish.tex') - f.write(res) - return manifest - - def zip_result(self): - import os, time - folder = os.path.dirname(self.file_paths[0]) - t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) - zip_folder(folder, get_log_folder(), f'{t}-polished.zip') - - -def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - - # <-------- 读取Latex文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 定义注释的正则表达式 - comment_pattern = r'(? - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - - # <-------- 多线程润色开始 ----------> - if language == 'en': - if mode == 'polish': - inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, " + - "improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - else: - inputs_array = [r"Below is a section from an academic paper, proofread this section." + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + - r"Answer me only with the revised text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] - elif language == 'zh': - if mode == 'polish': - inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - else: - inputs_array = [f"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag] - sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)] - - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待 - scroller_max_len = 80 - ) - - # <-------- 文本碎片重组为完整的tex文件,整理结果为压缩包 ----------> - try: - pfg.sp_file_result = [] - for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - pfg.write_result() - pfg.zip_result() - except: - print(trimmed_format_exc()) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" - res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name) - promote_file_to_downloadzone(res, chatbot=chatbot) - - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -@CatchException -def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用「Latex英文纠错+高亮修正位置(需Latex)插件」"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en') - - - - - - -@CatchException -def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh') - - - - -@CatchException -def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行纠错。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread') - - - diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" deleted file mode 100644 index d6c3b5edc30085397548128f9de0b55f22d593e2..0000000000000000000000000000000000000000 --- "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" +++ /dev/null @@ -1,176 +0,0 @@ -from toolbox import update_ui, promote_file_to_downloadzone -from toolbox import CatchException, report_exception, write_history_to_file -fast_debug = False - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - - print('Segmentation: done') - -def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - # <-------- 读取Latex文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 定义注释的正则表达式 - comment_pattern = r'(? - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 抽取摘要 ----------> - # if language == 'en': - # abs_extract_inputs = f"Please write an abstract for this paper" - - # # 单线,获取文章meta信息 - # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=abs_extract_inputs, - # inputs_show_user=f"正在抽取摘要信息。", - # llm_kwargs=llm_kwargs, - # chatbot=chatbot, history=[], - # sys_prompt="Your job is to collect information from materials。", - # ) - - # <-------- 多线程润色开始 ----------> - if language == 'en->zh': - inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - elif language == 'zh->en': - inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len = 80 - ) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" - res = write_history_to_file(gpt_response_collection, create_report_file_name) - promote_file_to_downloadzone(res, chatbot=chatbot) - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - - - - -@CatchException -def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') - - - - - -@CatchException -def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF.py" deleted file mode 100644 index fc878f9ff078bd92e48033e981159aa17a02cf2a..0000000000000000000000000000000000000000 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF.py" +++ /dev/null @@ -1,484 +0,0 @@ -from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str -from functools import partial -import glob, os, requests, time, json, tarfile - -pj = os.path.join -ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") - - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' -def switch_prompt(pfg, mode, more_requirement): - """ - Generate prompts and system prompts based on the mode for proofreading or translating. - Args: - - pfg: Proofreader or Translator instance. - - mode: A string specifying the mode, either 'proofread' or 'translate_zh'. - - Returns: - - inputs_array: A list of strings containing prompts for users to respond to. - - sys_prompt_array: A list of strings containing prompts for system prompts. - """ - n_split = len(pfg.sp_file_contents) - if mode == 'proofread_en': - inputs_array = [r"Below is a section from an academic paper, proofread this section." + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement + - r"Answer me only with the revised text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] - elif mode == 'translate_zh': - inputs_array = [ - r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + - r"Answer me only with the translated text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional translator." for _ in range(n_split)] - else: - assert False, "未知指令" - return inputs_array, sys_prompt_array - - -def desend_to_extracted_folder_if_exist(project_folder): - """ - Descend into the extracted folder if it exists, otherwise return the original folder. - - Args: - - project_folder: A string specifying the folder path. - - Returns: - - A string specifying the path to the extracted folder, or the original folder if there is no extracted folder. - """ - maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] - if len(maybe_dir) == 0: return project_folder - if maybe_dir[0].endswith('.extract'): return maybe_dir[0] - return project_folder - - -def move_project(project_folder, arxiv_id=None): - """ - Create a new work folder and copy the project folder to it. - - Args: - - project_folder: A string specifying the folder path of the project. - - Returns: - - A string specifying the path to the new work folder. - """ - import shutil, time - time.sleep(2) # avoid time string conflict - if arxiv_id is not None: - new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder') - else: - new_workfolder = f'{get_log_folder()}/{gen_time_str()}' - try: - shutil.rmtree(new_workfolder) - except: - pass - - # align subfolder if there is a folder wrapper - items = glob.glob(pj(project_folder, '*')) - items = [item for item in items if os.path.basename(item) != '__MACOSX'] - if len(glob.glob(pj(project_folder, '*.tex'))) == 0 and len(items) == 1: - if os.path.isdir(items[0]): project_folder = items[0] - - shutil.copytree(src=project_folder, dst=new_workfolder) - return new_workfolder - - -def arxiv_download(chatbot, history, txt, allow_cache=True): - def check_cached_translation_pdf(arxiv_id): - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation') - if not os.path.exists(translation_dir): - os.makedirs(translation_dir) - target_file = pj(translation_dir, 'translate_zh.pdf') - if os.path.exists(target_file): - promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot) - target_file_compare = pj(translation_dir, 'comparison.pdf') - if os.path.exists(target_file_compare): - promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot) - return target_file - return False - - def is_float(s): - try: - float(s) - return True - except ValueError: - return False - - if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt.strip() - if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt[:10] - - if not txt.startswith('https://arxiv.org'): - return txt, None # 是本地文件,跳过下载 - - # <-------------- inspect format -------------> - chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) - yield from update_ui(chatbot=chatbot, history=history) - time.sleep(1) # 刷新界面 - - url_ = txt # https://arxiv.org/abs/1707.06690 - if not txt.startswith('https://arxiv.org/abs/'): - msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。" - yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面 - return msg, None - # <-------------- set format -------------> - arxiv_id = url_.split('/abs/')[-1] - if 'v' in arxiv_id: arxiv_id = arxiv_id[:10] - cached_translation_pdf = check_cached_translation_pdf(arxiv_id) - if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id - - url_tar = url_.replace('/abs/', '/e-print/') - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print') - extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract') - os.makedirs(translation_dir, exist_ok=True) - - # <-------------- download arxiv source file -------------> - dst = pj(translation_dir, arxiv_id + '.tar') - if os.path.exists(dst): - yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面 - else: - yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面 - proxies = get_conf('proxies') - r = requests.get(url_tar, proxies=proxies) - with open(dst, 'wb+') as f: - f.write(r.content) - # <-------------- extract file -------------> - yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面 - from toolbox import extract_archive - extract_archive(file_path=dst, dest_dir=extract_dst) - return extract_dst, arxiv_id - - -def pdf2tex_project(pdf_file_path): - # Mathpix API credentials - app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY') - headers = {"app_id": app_id, "app_key": app_key} - - # Step 1: Send PDF file for processing - options = { - "conversion_formats": {"tex.zip": True}, - "math_inline_delimiters": ["$", "$"], - "rm_spaces": True - } - - response = requests.post(url="https://api.mathpix.com/v3/pdf", - headers=headers, - data={"options_json": json.dumps(options)}, - files={"file": open(pdf_file_path, "rb")}) - - if response.ok: - pdf_id = response.json()["pdf_id"] - print(f"PDF processing initiated. PDF ID: {pdf_id}") - - # Step 2: Check processing status - while True: - conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers) - conversion_data = conversion_response.json() - - if conversion_data["status"] == "completed": - print("PDF processing completed.") - break - elif conversion_data["status"] == "error": - print("Error occurred during processing.") - else: - print(f"Processing status: {conversion_data['status']}") - time.sleep(5) # wait for a few seconds before checking again - - # Step 3: Save results to local files - output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output') - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex" - response = requests.get(url, headers=headers) - file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1]) - output_name = f"{file_name_wo_dot}.tex.zip" - output_path = os.path.join(output_dir, output_name) - with open(output_path, "wb") as output_file: - output_file.write(response.content) - print(f"tex.zip file saved at: {output_path}") - - import zipfile - unzip_dir = os.path.join(output_dir, file_name_wo_dot) - with zipfile.ZipFile(output_path, 'r') as zip_ref: - zip_ref.extractall(unzip_dir) - - return unzip_dir - - else: - print(f"Error sending PDF for processing. Status code: {response.status_code}") - return None - - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - - -@CatchException -def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # <-------------- information about this plugin -------------> - chatbot.append(["函数插件功能?", - "对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- clear history and read input -------------> - history = [] - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id=None) - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_proofread_en.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='proofread_en', - switch_prompt=_switch_prompt_) - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', - main_file_modified='merge_proofread_en', - work_folder_original=project_folder, work_folder_modified=project_folder, - work_folder=project_folder) - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", - '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - # <-------------- we are done -------------> - return success - - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -@CatchException -def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # <-------------- information about this plugin -------------> - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - no_cache = more_req.startswith("--no-cache") - if no_cache: more_req.lstrip("--no-cache") - allow_cache = not no_cache - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- clear history and read input -------------> - history = [] - try: - txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) - except tarfile.ReadError as e: - yield from update_ui_lastest_msg( - "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", - chatbot=chatbot, history=history) - return - - if txt.endswith('.pdf'): - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现已经存在翻译好的PDF文档") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id) - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_translate_zh.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='translate_zh', - switch_prompt=_switch_prompt_) - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', - main_file_modified='merge_translate_zh', mode='translate_zh', - work_folder_original=project_folder, work_folder_modified=project_folder, - work_folder=project_folder) - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", - '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - # <-------------- we are done -------------> - return success - - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -@CatchException -def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # <-------------- information about this plugin -------------> - chatbot.append([ - "函数插件功能?", - "将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - no_cache = more_req.startswith("--no-cache") - if no_cache: more_req.lstrip("--no-cache") - allow_cache = not no_cache - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- clear history and read input -------------> - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) != 1: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY') - if len(app_id) == 0 or len(app_key) == 0: - report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- convert pdf into tex -------------> - project_folder = pdf2tex_project(file_manifest[0]) - - # Translate English Latex to Chinese Latex, and compile it - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder) - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_translate_zh.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='translate_zh', - switch_prompt=_switch_prompt_) - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', - main_file_modified='merge_translate_zh', mode='translate_zh', - work_folder_original=project_folder, work_folder_modified=project_folder, - work_folder=project_folder) - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", - '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) - yield from update_ui(chatbot=chatbot, history=history); - time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - # <-------------- we are done -------------> - return success diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" deleted file mode 100644 index 36c99e71cf7ad81dcf1b721b1f98f59ef694c7fa..0000000000000000000000000000000000000000 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" +++ /dev/null @@ -1,306 +0,0 @@ -from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str -from functools import partial -import glob, os, requests, time -pj = os.path.join -ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' -def switch_prompt(pfg, mode, more_requirement): - """ - Generate prompts and system prompts based on the mode for proofreading or translating. - Args: - - pfg: Proofreader or Translator instance. - - mode: A string specifying the mode, either 'proofread' or 'translate_zh'. - - Returns: - - inputs_array: A list of strings containing prompts for users to respond to. - - sys_prompt_array: A list of strings containing prompts for system prompts. - """ - n_split = len(pfg.sp_file_contents) - if mode == 'proofread_en': - inputs_array = [r"Below is a section from an academic paper, proofread this section." + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement + - r"Answer me only with the revised text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] - elif mode == 'translate_zh': - inputs_array = [r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + - r"Answer me only with the translated text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional translator." for _ in range(n_split)] - else: - assert False, "未知指令" - return inputs_array, sys_prompt_array - -def desend_to_extracted_folder_if_exist(project_folder): - """ - Descend into the extracted folder if it exists, otherwise return the original folder. - - Args: - - project_folder: A string specifying the folder path. - - Returns: - - A string specifying the path to the extracted folder, or the original folder if there is no extracted folder. - """ - maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] - if len(maybe_dir) == 0: return project_folder - if maybe_dir[0].endswith('.extract'): return maybe_dir[0] - return project_folder - -def move_project(project_folder, arxiv_id=None): - """ - Create a new work folder and copy the project folder to it. - - Args: - - project_folder: A string specifying the folder path of the project. - - Returns: - - A string specifying the path to the new work folder. - """ - import shutil, time - time.sleep(2) # avoid time string conflict - if arxiv_id is not None: - new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder') - else: - new_workfolder = f'{get_log_folder()}/{gen_time_str()}' - try: - shutil.rmtree(new_workfolder) - except: - pass - - # align subfolder if there is a folder wrapper - items = glob.glob(pj(project_folder,'*')) - items = [item for item in items if os.path.basename(item)!='__MACOSX'] - if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1: - if os.path.isdir(items[0]): project_folder = items[0] - - shutil.copytree(src=project_folder, dst=new_workfolder) - return new_workfolder - -def arxiv_download(chatbot, history, txt, allow_cache=True): - def check_cached_translation_pdf(arxiv_id): - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation') - if not os.path.exists(translation_dir): - os.makedirs(translation_dir) - target_file = pj(translation_dir, 'translate_zh.pdf') - if os.path.exists(target_file): - promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot) - target_file_compare = pj(translation_dir, 'comparison.pdf') - if os.path.exists(target_file_compare): - promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot) - return target_file - return False - def is_float(s): - try: - float(s) - return True - except ValueError: - return False - if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt.strip() - if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt[:10] - if not txt.startswith('https://arxiv.org'): - return txt, None - - # <-------------- inspect format -------------> - chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) - yield from update_ui(chatbot=chatbot, history=history) - time.sleep(1) # 刷新界面 - - url_ = txt # https://arxiv.org/abs/1707.06690 - if not txt.startswith('https://arxiv.org/abs/'): - msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。" - yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面 - return msg, None - # <-------------- set format -------------> - arxiv_id = url_.split('/abs/')[-1] - if 'v' in arxiv_id: arxiv_id = arxiv_id[:10] - cached_translation_pdf = check_cached_translation_pdf(arxiv_id) - if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id - - url_tar = url_.replace('/abs/', '/e-print/') - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print') - extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract') - os.makedirs(translation_dir, exist_ok=True) - - # <-------------- download arxiv source file -------------> - dst = pj(translation_dir, arxiv_id+'.tar') - if os.path.exists(dst): - yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面 - else: - yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面 - proxies = get_conf('proxies') - r = requests.get(url_tar, proxies=proxies) - with open(dst, 'wb+') as f: - f.write(r.content) - # <-------------- extract file -------------> - yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面 - from toolbox import extract_archive - extract_archive(file_path=dst, dest_dir=extract_dst) - return extract_dst, arxiv_id -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - - -@CatchException -def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # <-------------- information about this plugin -------------> - chatbot.append([ "函数插件功能?", - "对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([ f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- clear history and read input -------------> - history = [] - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id=None) - - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_proofread_en.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_) - - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_proofread_en', - work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder) - - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - # <-------------- we are done -------------> - return success - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -@CatchException -def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # <-------------- information about this plugin -------------> - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - no_cache = more_req.startswith("--no-cache") - if no_cache: more_req.lstrip("--no-cache") - allow_cache = not no_cache - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([ f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- clear history and read input -------------> - history = [] - txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) - if txt.endswith('.pdf'): - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id) - - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_translate_zh.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='translate_zh', switch_prompt=_switch_prompt_) - - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_translate_zh', mode='translate_zh', - work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder) - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - - # <-------------- we are done -------------> - return success diff --git a/crazy_functions/__init__.py b/crazy_functions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py deleted file mode 100644 index 4f8fda9d5872db9c178321d43415b24dbea024bb..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/auto_agent.py +++ /dev/null @@ -1,23 +0,0 @@ -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton -from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom -from crazy_functions.agent_fns.general import AutoGenGeneral - - - -class AutoGenMath(AutoGenGeneral): - - def define_agents(self): - from autogen import AssistantAgent, UserProxyAgent - return [ - { - "name": "assistant", # name of the agent. - "cls": AssistantAgent, # class of the agent. - }, - { - "name": "user_proxy", # name of the agent. - "cls": UserProxyAgent, # class of the agent. - "human_input_mode": "ALWAYS", # always ask for human input. - "llm_config": False, # disables llm-based auto reply. - }, - ] \ No newline at end of file diff --git a/crazy_functions/agent_fns/echo_agent.py b/crazy_functions/agent_fns/echo_agent.py deleted file mode 100644 index 52bf72debc7a56a89b277ced80078ea6b985e1fa..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/echo_agent.py +++ /dev/null @@ -1,19 +0,0 @@ -from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom - -class EchoDemo(PluginMultiprocessManager): - def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程 - self.child_conn = child_conn - while True: - msg = self.child_conn.recv() # PipeCom - if msg.cmd == "user_input": - # wait futher user input - self.child_conn.send(PipeCom("show", msg.content)) - wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.") - if not wait_success: - # wait timeout, terminate this subprocess_worker - break - elif msg.cmd == "terminate": - self.child_conn.send(PipeCom("done", "")) - break - print('[debug] subprocess_worker terminated') \ No newline at end of file diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py deleted file mode 100644 index 327a613b36b456220ac85d42a6a536f4fce42ea6..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/general.py +++ /dev/null @@ -1,138 +0,0 @@ -from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate -from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom -from request_llms.bridge_all import predict_no_ui_long_connection -import time - -def gpt_academic_generate_oai_reply( - self, - messages, - sender, - config, -): - llm_config = self.llm_config if config is None else config - if llm_config is False: - return False, None - if messages is None: - messages = self._oai_messages[sender] - - inputs = messages[-1]['content'] - history = [] - for message in messages[:-1]: - history.append(message['content']) - context=messages[-1].pop("context", None) - assert context is None, "预留参数 context 未实现" - - reply = predict_no_ui_long_connection( - inputs=inputs, - llm_kwargs=llm_config, - history=history, - sys_prompt=self._oai_system_message[0]['content'], - console_slience=True - ) - assumed_done = reply.endswith('\nTERMINATE') - return True, reply - -class AutoGenGeneral(PluginMultiprocessManager): - def gpt_academic_print_override(self, user_proxy, message, sender): - # ⭐⭐ run in subprocess - try: - print_msg = sender.name + "\n\n---\n\n" + message["content"] - except: - print_msg = sender.name + "\n\n---\n\n" + message - self.child_conn.send(PipeCom("show", print_msg)) - - def gpt_academic_get_human_input(self, user_proxy, message): - # ⭐⭐ run in subprocess - patience = 300 - begin_waiting_time = time.time() - self.child_conn.send(PipeCom("interact", message)) - while True: - time.sleep(0.5) - if self.child_conn.poll(): - wait_success = True - break - if time.time() - begin_waiting_time > patience: - self.child_conn.send(PipeCom("done", "")) - wait_success = False - break - if wait_success: - return self.child_conn.recv().content - else: - raise TimeoutError("等待用户输入超时") - - def define_agents(self): - raise NotImplementedError - - def exe_autogen(self, input): - # ⭐⭐ run in subprocess - input = input.content - code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} - agents = self.define_agents() - user_proxy = None - assistant = None - for agent_kwargs in agents: - agent_cls = agent_kwargs.pop('cls') - kwargs = { - 'llm_config':self.llm_kwargs, - 'code_execution_config':code_execution_config - } - kwargs.update(agent_kwargs) - agent_handle = agent_cls(**kwargs) - agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b) - for d in agent_handle._reply_func_list: - if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply': - d['reply_func'] = gpt_academic_generate_oai_reply - if agent_kwargs['name'] == 'user_proxy': - agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) - user_proxy = agent_handle - if agent_kwargs['name'] == 'assistant': assistant = agent_handle - try: - if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义") - with ProxyNetworkActivate("AutoGen"): - user_proxy.initiate_chat(assistant, message=input) - except Exception as e: - tb_str = '```\n' + trimmed_format_exc() + '```' - self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str)) - - def subprocess_worker(self, child_conn): - # ⭐⭐ run in subprocess - self.child_conn = child_conn - while True: - msg = self.child_conn.recv() # PipeCom - self.exe_autogen(msg) - - -class AutoGenGroupChat(AutoGenGeneral): - def exe_autogen(self, input): - # ⭐⭐ run in subprocess - import autogen - - input = input.content - with ProxyNetworkActivate("AutoGen"): - code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} - agents = self.define_agents() - agents_instances = [] - for agent_kwargs in agents: - agent_cls = agent_kwargs.pop("cls") - kwargs = {"code_execution_config": code_execution_config} - kwargs.update(agent_kwargs) - agent_handle = agent_cls(**kwargs) - agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) - agents_instances.append(agent_handle) - if agent_kwargs["name"] == "user_proxy": - user_proxy = agent_handle - user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) - try: - groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50) - manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config()) - manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) - manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a) - if user_proxy is None: - raise Exception("user_proxy is not defined") - user_proxy.initiate_chat(manager, message=input) - except Exception: - tb_str = "```\n" + trimmed_format_exc() + "```" - self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str)) - - def define_group_chat_manager_config(self): - raise NotImplementedError diff --git a/crazy_functions/agent_fns/persistent.py b/crazy_functions/agent_fns/persistent.py deleted file mode 100644 index 82c869cb18ceba5c56e05d3d8b18bb968cf3b35e..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/persistent.py +++ /dev/null @@ -1,16 +0,0 @@ -from toolbox import Singleton -@Singleton -class GradioMultiuserManagerForPersistentClasses(): - def __init__(self): - self.mapping = {} - - def already_alive(self, key): - return (key in self.mapping) and (self.mapping[key].is_alive()) - - def set(self, key, x): - self.mapping[key] = x - return self.mapping[key] - - def get(self, key): - return self.mapping[key] - diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py deleted file mode 100644 index a292af810ef23992b036cc0697785268bc8a6250..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/pipe.py +++ /dev/null @@ -1,194 +0,0 @@ -from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone -from crazy_functions.agent_fns.watchdog import WatchDog -import time, os - -class PipeCom: - def __init__(self, cmd, content) -> None: - self.cmd = cmd - self.content = content - - -class PluginMultiprocessManager: - def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # ⭐ run in main process - self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str()) - self.previous_work_dir_files = {} - self.llm_kwargs = llm_kwargs - self.plugin_kwargs = plugin_kwargs - self.chatbot = chatbot - self.history = history - self.system_prompt = system_prompt - # self.user_request = user_request - self.alive = True - self.use_docker = get_conf("AUTOGEN_USE_DOCKER") - self.last_user_input = "" - # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time - timeout_seconds = 5 * 60 - self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) - self.heartbeat_watchdog.begin_watch() - - def feed_heartbeat_watchdog(self): - # feed this `dog`, so the dog will not `bark` (bark_fn will terminate the instance) - self.heartbeat_watchdog.feed() - - def is_alive(self): - return self.alive - - def launch_subprocess_with_pipe(self): - # ⭐ run in main process - from multiprocessing import Process, Pipe - - parent_conn, child_conn = Pipe() - self.p = Process(target=self.subprocess_worker, args=(child_conn,)) - self.p.daemon = True - self.p.start() - return parent_conn - - def terminate(self): - self.p.terminate() - self.alive = False - print("[debug] instance terminated") - - def subprocess_worker(self, child_conn): - # ⭐⭐ run in subprocess - raise NotImplementedError - - def send_command(self, cmd): - # ⭐ run in main process - repeated = False - if cmd == self.last_user_input: - repeated = True - cmd = "" - else: - self.last_user_input = cmd - self.parent_conn.send(PipeCom("user_input", cmd)) - return repeated, cmd - - def immediate_showoff_when_possible(self, fp): - # ⭐ 主进程 - # 获取fp的拓展名 - file_type = fp.split('.')[-1] - # 如果是文本文件, 则直接显示文本内容 - if file_type.lower() in ['png', 'jpg']: - image_path = os.path.abspath(fp) - self.chatbot.append([ - '检测到新生图像:', - f'本地文件预览:
' - ]) - yield from update_ui(chatbot=self.chatbot, history=self.history) - - def overwatch_workdir_file_change(self): - # ⭐ 主进程 Docker 外挂文件夹监控 - path_to_overwatch = self.autogen_work_dir - change_list = [] - # 扫描路径下的所有文件, 并与self.previous_work_dir_files中所记录的文件进行对比, - # 如果有新文件出现,或者文件的修改时间发生变化,则更新self.previous_work_dir_files中 - # 把新文件和发生变化的文件的路径记录到 change_list 中 - for root, dirs, files in os.walk(path_to_overwatch): - for file in files: - file_path = os.path.join(root, file) - if file_path not in self.previous_work_dir_files.keys(): - last_modified_time = os.stat(file_path).st_mtime - self.previous_work_dir_files.update({file_path: last_modified_time}) - change_list.append(file_path) - else: - last_modified_time = os.stat(file_path).st_mtime - if last_modified_time != self.previous_work_dir_files[file_path]: - self.previous_work_dir_files[file_path] = last_modified_time - change_list.append(file_path) - if len(change_list) > 0: - file_links = "" - for f in change_list: - res = promote_file_to_downloadzone(f) - file_links += f'
{res}' - yield from self.immediate_showoff_when_possible(f) - - self.chatbot.append(['检测到新生文档.', f'文档清单如下: {file_links}']) - yield from update_ui(chatbot=self.chatbot, history=self.history) - return change_list - - - def main_process_ui_control(self, txt, create_or_resume) -> str: - # ⭐ 主进程 - if create_or_resume == 'create': - self.cnt = 1 - self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ - repeated, cmd_to_autogen = self.send_command(txt) - if txt == 'exit': - self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) - yield from update_ui(chatbot=self.chatbot, history=self.history) - self.terminate() - return "terminate" - - # patience = 10 - - while True: - time.sleep(0.5) - if not self.alive: - # the heartbeat watchdog might have it killed - self.terminate() - return "terminate" - if self.parent_conn.poll(): - self.feed_heartbeat_watchdog() - if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]: - self.chatbot.pop(-1) # remove the last line - if "等待您的进一步指令" in self.chatbot[-1][-1]: - self.chatbot.pop(-1) # remove the last line - if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]: - self.chatbot.pop(-1) # remove the last line - msg = self.parent_conn.recv() # PipeCom - if msg.cmd == "done": - self.chatbot.append([f"结束", msg.content]) - self.cnt += 1 - yield from update_ui(chatbot=self.chatbot, history=self.history) - self.terminate() - break - if msg.cmd == "show": - yield from self.overwatch_workdir_file_change() - notice = "" - if repeated: notice = "(自动忽略重复的输入)" - self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content]) - self.cnt += 1 - yield from update_ui(chatbot=self.chatbot, history=self.history) - if msg.cmd == "interact": - yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"程序抵达用户反馈节点.", msg.content + - "\n\n等待您的进一步指令." + - "\n\n(1) 一般情况下您不需要说什么, 清空输入区, 然后直接点击“提交”以继续. " + - "\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " + - "\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. " - ]) - yield from update_ui(chatbot=self.chatbot, history=self.history) - # do not terminate here, leave the subprocess_worker instance alive - return "wait_feedback" - else: - self.feed_heartbeat_watchdog() - if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]: - # begin_waiting_time = time.time() - self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"]) - self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")] - yield from update_ui(chatbot=self.chatbot, history=self.history) - # if time.time() - begin_waiting_time > patience: - # self.chatbot.append([f"结束", "等待超时, 终止AutoGen程序。"]) - # yield from update_ui(chatbot=self.chatbot, history=self.history) - # self.terminate() - # return "terminate" - - self.terminate() - return "terminate" - - def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): - # ⭐⭐ run in subprocess - patience = 5 * 60 - begin_waiting_time = time.time() - self.child_conn.send(PipeCom("interact", wait_msg)) - while True: - time.sleep(0.5) - if self.child_conn.poll(): - wait_success = True - break - if time.time() - begin_waiting_time > patience: - self.child_conn.send(PipeCom("done", "")) - wait_success = False - break - return wait_success diff --git a/crazy_functions/agent_fns/watchdog.py b/crazy_functions/agent_fns/watchdog.py deleted file mode 100644 index 2a2bdfab95097d6c4ad36329ab1fa02dd2ebe868..0000000000000000000000000000000000000000 --- a/crazy_functions/agent_fns/watchdog.py +++ /dev/null @@ -1,28 +0,0 @@ -import threading, time - -class WatchDog(): - def __init__(self, timeout, bark_fn, interval=3, msg="") -> None: - self.last_feed = None - self.timeout = timeout - self.bark_fn = bark_fn - self.interval = interval - self.msg = msg - self.kill_dog = False - - def watch(self): - while True: - if self.kill_dog: break - if time.time() - self.last_feed > self.timeout: - if len(self.msg) > 0: print(self.msg) - self.bark_fn() - break - time.sleep(self.interval) - - def begin_watch(self): - self.last_feed = time.time() - th = threading.Thread(target=self.watch) - th.daemon = True - th.start() - - def feed(self): - self.last_feed = time.time() diff --git "a/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" "b/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" deleted file mode 100644 index 1b28228290f9ee7873787b420ed3fa742df427fa..0000000000000000000000000000000000000000 --- "a/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" +++ /dev/null @@ -1,141 +0,0 @@ -from toolbox import CatchException, update_ui, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency -import datetime, json - -def fetch_items(list_of_items, batch_size): - for i in range(0, len(list_of_items), batch_size): - yield list_of_items[i:i + batch_size] - -def string_to_options(arguments): - import argparse - import shlex - - # Create an argparse.ArgumentParser instance - parser = argparse.ArgumentParser() - - # Add command-line arguments - parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo") - parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='') - parser.add_argument("--system_prompt", type=str, help="System prompt", default='') - parser.add_argument("--batch", type=int, help="System prompt", default=50) - parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50) - parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2) - parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1) - parser.add_argument("--json_dataset", type=str, help="json_dataset", default="") - parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="") - - - - # Parse the arguments - args = parser.parse_args(shlex.split(arguments)) - - return args - -@CatchException -def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - args = plugin_kwargs.get("advanced_arg", None) - if args is None: - chatbot.append(("没给定指令", "退出")) - yield from update_ui(chatbot=chatbot, history=history); return - else: - arguments = string_to_options(arguments=args) - - dat = [] - with open(txt, 'r', encoding='utf8') as f: - for line in f.readlines(): - json_dat = json.loads(line) - dat.append(json_dat["content"]) - - llm_kwargs['llm_model'] = arguments.llm_to_learn - for batch in fetch_items(dat, arguments.batch): - res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)], - inputs_show_user_array=[f"Show Nothing" for _ in (batch)], - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[] for _ in (batch)], - sys_prompt_array=[arguments.system_prompt for _ in (batch)], - max_workers=10 # OpenAI所允许的最大并行过载 - ) - - with open(txt+'.generated.json', 'a+', encoding='utf8') as f: - for b, r in zip(batch, res[1::2]): - f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n') - - promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot) - return - - - -@CatchException -def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - import subprocess - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - args = plugin_kwargs.get("advanced_arg", None) - if args is None: - chatbot.append(("没给定指令", "退出")) - yield from update_ui(chatbot=chatbot, history=history); return - else: - arguments = string_to_options(arguments=args) - - - - pre_seq_len = arguments.pre_seq_len # 128 - learning_rate = arguments.learning_rate # 2e-2 - num_gpus = arguments.num_gpus # 1 - json_dataset = arguments.json_dataset # 't_code.json' - ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning' - - command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \ - --do_train \ - --train_file AdvertiseGen/{json_dataset} \ - --validation_file AdvertiseGen/{json_dataset} \ - --preprocessing_num_workers 20 \ - --prompt_column content \ - --response_column summary \ - --overwrite_cache \ - --model_name_or_path THUDM/chatglm2-6b \ - --output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \ - --overwrite_output_dir \ - --max_source_length 256 \ - --max_target_length 256 \ - --per_device_train_batch_size 1 \ - --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 16 \ - --predict_with_generate \ - --max_steps 100 \ - --logging_steps 10 \ - --save_steps 20 \ - --learning_rate {learning_rate} \ - --pre_seq_len {pre_seq_len} \ - --quantization_bit 4" - - process = subprocess.Popen(command, shell=True, cwd=ptuning_directory) - try: - process.communicate(timeout=3600*24) - except subprocess.TimeoutExpired: - process.kill() - return diff --git a/crazy_functions/crazy_functions_test.py b/crazy_functions/crazy_functions_test.py deleted file mode 100644 index 0c623b8e027858b2579a021769bb304e34c4e373..0000000000000000000000000000000000000000 --- a/crazy_functions/crazy_functions_test.py +++ /dev/null @@ -1,231 +0,0 @@ -""" -这是什么? - 这个文件用于函数插件的单元测试 - 运行方法 python crazy_functions/crazy_functions_test.py -""" - -# ============================================================================================================================== - -def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume) - sys.path.append(root_dir_assume) -validate_path() # validate path so you can run from base directory - -# ============================================================================================================================== - -from colorful import * -from toolbox import get_conf, ChatBotWithCookies -import contextlib -import os -import sys -from functools import wraps -proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - -llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, - 'top_p':1.0, - 'max_length': None, - 'temperature':1.0, -} -plugin_kwargs = { } -chatbot = ChatBotWithCookies(llm_kwargs) -history = [] -system_prompt = "Serve me as a writing and programming assistant." -web_port = 1024 - -# ============================================================================================================================== - -def silence_stdout(func): - @wraps(func) - def wrapper(*args, **kwargs): - _original_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') - for q in func(*args, **kwargs): - sys.stdout = _original_stdout - yield q - sys.stdout = open(os.devnull, 'w') - sys.stdout.close() - sys.stdout = _original_stdout - return wrapper - -class CLI_Printer(): - def __init__(self) -> None: - self.pre_buf = "" - - def print(self, buf): - bufp = "" - for index, chat in enumerate(buf): - a, b = chat - bufp += sprint亮靛('[Me]:' + a) + '\n' - bufp += '[GPT]:' + b - if index < len(buf)-1: - bufp += '\n' - - if self.pre_buf!="" and bufp.startswith(self.pre_buf): - print(bufp[len(self.pre_buf):], end='') - else: - print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'+bufp, end='') - self.pre_buf = bufp - return - -cli_printer = CLI_Printer() -# ============================================================================================================================== -def test_解析一个Python项目(): - from crazy_functions.解析项目源代码 import 解析一个Python项目 - txt = "crazy_functions/test_project/python/dqn" - for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_解析一个Cpp项目(): - from crazy_functions.解析项目源代码 import 解析一个C项目 - txt = "crazy_functions/test_project/cpp/cppipc" - for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Latex英文润色(): - from crazy_functions.Latex全文润色 import Latex英文润色 - txt = "crazy_functions/test_project/latex/attention" - for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Markdown中译英(): - from crazy_functions.批量Markdown翻译 import Markdown中译英 - txt = "README.md" - for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_批量翻译PDF文档(): - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_谷歌检索小助手(): - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=" - for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_总结word文档(): - from crazy_functions.总结word文档 import 总结word文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_下载arxiv论文并翻译摘要(): - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - txt = "1812.10695" - for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_联网回答问题(): - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - # txt = "谁是应急食品?" - # >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。' - # txt = "道路千万条,安全第一条。后面两句是?" - # >> '行车不规范,亲人两行泪。' - # txt = "You should have gone for the head. What does that mean?" - # >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame. - txt = "AutoGPT是什么?" - for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print("当前问答:", cb[-1][-1].replace("\n"," ")) - for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1]) - -def test_解析ipynb文件(): - from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - txt = "crazy_functions/test_samples" - for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - - -def test_数学动画生成manim(): - from crazy_functions.数学动画生成manim import 动画生成 - txt = "A ball split into 2, and then split into 4, and finally split into 8." - for cookies, cb, hist, msg in 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - - - -def test_Markdown多语言(): - from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 - txt = "README.md" - history = [] - for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]: - plugin_kwargs = {"advanced_arg": lang} - for cookies, cb, hist, msg in Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Langchain知识库(): - from crazy_functions.Langchain知识库 import 知识库问答 - txt = "./" - chatbot = ChatBotWithCookies(llm_kwargs) - for cookies, cb, hist, msg in silence_stdout(知识库问答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - - chatbot = ChatBotWithCookies(cookies) - from crazy_functions.Langchain知识库 import 读取知识库作答 - txt = "What is the installation method?" - for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - -def test_Langchain知识库读取(): - from crazy_functions.Langchain知识库 import 读取知识库作答 - txt = "远程云服务器部署?" - for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - -def test_Latex(): - from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比, Latex翻译中文并重新编译PDF - - # txt = r"https://arxiv.org/abs/1706.03762" - # txt = r"https://arxiv.org/abs/1902.03185" - # txt = r"https://arxiv.org/abs/2305.18290" - # txt = r"https://arxiv.org/abs/2305.17608" - # txt = r"https://arxiv.org/abs/2211.16068" # ACE - # txt = r"C:\Users\x\arxiv_cache\2211.16068\workfolder" # ACE - # txt = r"https://arxiv.org/abs/2002.09253" - # txt = r"https://arxiv.org/abs/2306.07831" - # txt = r"https://arxiv.org/abs/2212.10156" - # txt = r"https://arxiv.org/abs/2211.11559" - # txt = r"https://arxiv.org/abs/2303.08774" - txt = r"https://arxiv.org/abs/2303.12712" - # txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder" - - - for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - - - - # txt = "2302.02948.tar" - # print(txt) - # main_tex, work_folder = Latex预处理(txt) - # print('main tex:', main_tex) - # res = 编译Latex(main_tex, work_folder) - # # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # cli_printer.print(cb) # print(cb) - - - -# test_解析一个Python项目() -# test_Latex英文润色() -# test_Markdown中译英() -# test_批量翻译PDF文档() -# test_谷歌检索小助手() -# test_总结word文档() -# test_下载arxiv论文并翻译摘要() -# test_解析一个Cpp项目() -# test_联网回答问题() -# test_解析ipynb文件() -# test_数学动画生成manim() -# test_Langchain知识库() -# test_Langchain知识库读取() -if __name__ == "__main__": - test_Latex() - input("程序完成,回车退出。") - print("退出。") \ No newline at end of file diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py deleted file mode 100644 index 9c8aeccb65f567aca3ce3c2bfda066bafd9c5cba..0000000000000000000000000000000000000000 --- a/crazy_functions/crazy_utils.py +++ /dev/null @@ -1,608 +0,0 @@ -from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton -import threading -import os -import logging - -def input_clipping(inputs, history, max_token_limit): - import numpy as np - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - - mode = 'input-and-history' - # 当 输入部分的token占比 小于 全文的一半时,只裁剪历史 - input_token_num = get_token_num(inputs) - if input_token_num < max_token_limit//2: - mode = 'only-history' - max_token_limit = max_token_limit - input_token_num - - everything = [inputs] if mode == 'input-and-history' else [''] - everything.extend(history) - n_token = get_token_num('\n'.join(everything)) - everything_token = [get_token_num(e) for e in everything] - delta = max(everything_token) // 16 # 截断时的颗粒度 - - while n_token > max_token_limit: - where = np.argmax(everything_token) - encoded = enc.encode(everything[where], disallowed_special=()) - clipped_encoded = encoded[:len(encoded)-delta] - everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char - everything_token[where] = get_token_num(everything[where]) - n_token = get_token_num('\n'.join(everything)) - - if mode == 'input-and-history': - inputs = everything[0] - else: - pass - history = everything[1:] - return inputs, history - -def request_gpt_model_in_new_thread_with_ui_alive( - inputs, inputs_show_user, llm_kwargs, - chatbot, history, sys_prompt, refresh_interval=0.2, - handle_token_exceed=True, - retry_times_at_unknown_error=2, - ): - """ - Request GPT model,请求GPT模型同时维持用户界面活跃。 - - 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行): - inputs (string): List of inputs (输入) - inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性) - top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数) - temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数) - chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化) - history (list): List of chat history (历史,对话历史列表) - sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样) - refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果) - handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启 - retry_times_at_unknown_error:失败时的重试次数 - - 输出 Returns: - future: 输出,GPT返回的结果 - """ - import time - from concurrent.futures import ThreadPoolExecutor - from request_llms.bridge_all import predict_no_ui_long_connection - # 用户反馈 - chatbot.append([inputs_show_user, ""]) - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - executor = ThreadPoolExecutor(max_workers=16) - mutable = ["", time.time(), ""] - # 看门狗耐心 - watch_dog_patience = 5 - # 请求任务 - def _req_gpt(inputs, history, sys_prompt): - retry_op = retry_times_at_unknown_error - exceeded_cnt = 0 - while True: - # watchdog error - if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience: - raise RuntimeError("检测到程序终止。") - try: - # 【第一种情况】:顺利完成 - result = predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, - history=history, sys_prompt=sys_prompt, observe_window=mutable) - return result - except ConnectionAbortedError as token_exceeded_error: - # 【第二种情况】:Token溢出 - if handle_token_exceed: - exceeded_cnt += 1 - # 【选择处理】 尝试计算比例,尽可能多地保留文本 - from toolbox import get_reduce_token_percent - p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) - MAX_TOKEN = get_max_token(llm_kwargs) - EXCEED_ALLO = 512 + 512 * exceeded_cnt - inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) - mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n' - continue # 返回重试 - else: - # 【选择放弃】 - tb_str = '```\n' + trimmed_format_exc() + '```' - mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" - return mutable[0] # 放弃 - except: - # 【第三种情况】:其他错误:重试几次 - tb_str = '```\n' + trimmed_format_exc() + '```' - print(tb_str) - mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" - if retry_op > 0: - retry_op -= 1 - mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n" - if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str): - time.sleep(30) - time.sleep(5) - continue # 返回重试 - else: - time.sleep(5) - return mutable[0] # 放弃 - - # 提交任务 - future = executor.submit(_req_gpt, inputs, history, sys_prompt) - while True: - # yield一次以刷新前端页面 - time.sleep(refresh_interval) - # “喂狗”(看门狗) - mutable[1] = time.time() - if future.done(): - break - chatbot[-1] = [chatbot[-1][0], mutable[0]] - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - - final_result = future.result() - chatbot[-1] = [chatbot[-1][0], final_result] - yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息 - return final_result - -def can_multi_process(llm): - if llm.startswith('gpt-'): return True - if llm.startswith('api2d-'): return True - if llm.startswith('azure-'): return True - if llm.startswith('spark'): return True - if llm.startswith('zhipuai') or llm.startswith('glm-'): return True - return False - -def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array, inputs_show_user_array, llm_kwargs, - chatbot, history_array, sys_prompt_array, - refresh_interval=0.2, max_workers=-1, scroller_max_len=30, - handle_token_exceed=True, show_user_at_complete=False, - retry_times_at_unknown_error=2, - ): - """ - Request GPT model using multiple threads with UI and high efficiency - 请求GPT模型的[多线程]版。 - 具备以下功能: - 实时在UI上反馈远程数据流 - 使用线程池,可调节线程池的大小避免openai的流量限制错误 - 处理中途中止的情况 - 网络等出问题时,会把traceback和已经接收的数据转入输出 - - 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行): - inputs_array (list): List of inputs (每个子任务的输入) - inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性) - llm_kwargs: llm_kwargs参数 - chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化) - history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史) - sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样) - refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果) - max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误) - scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果) - handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本) - handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启 - show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框) - retry_times_at_unknown_error:子任务失败时的重试次数 - - 输出 Returns: - list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。) - """ - import time, random - from concurrent.futures import ThreadPoolExecutor - from request_llms.bridge_all import predict_no_ui_long_connection - assert len(inputs_array) == len(history_array) - assert len(inputs_array) == len(sys_prompt_array) - if max_workers == -1: # 读取配置文件 - try: max_workers = get_conf('DEFAULT_WORKER_NUM') - except: max_workers = 8 - if max_workers <= 0: max_workers = 3 - # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿 - if not can_multi_process(llm_kwargs['llm_model']): - max_workers = 1 - - executor = ThreadPoolExecutor(max_workers=max_workers) - n_frag = len(inputs_array) - # 用户反馈 - chatbot.append(["请开始多线程操作。", ""]) - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - # 跨线程传递 - mutable = [["", time.time(), "等待中"] for _ in range(n_frag)] - - # 看门狗耐心 - watch_dog_patience = 5 - - # 子线程任务 - def _req_gpt(index, inputs, history, sys_prompt): - gpt_say = "" - retry_op = retry_times_at_unknown_error - exceeded_cnt = 0 - mutable[index][2] = "执行中" - detect_timeout = lambda: len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience - while True: - # watchdog error - if detect_timeout(): raise RuntimeError("检测到程序终止。") - try: - # 【第一种情况】:顺利完成 - gpt_say = predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, history=history, - sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True - ) - mutable[index][2] = "已成功" - return gpt_say - except ConnectionAbortedError as token_exceeded_error: - # 【第二种情况】:Token溢出 - if handle_token_exceed: - exceeded_cnt += 1 - # 【选择处理】 尝试计算比例,尽可能多地保留文本 - from toolbox import get_reduce_token_percent - p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) - MAX_TOKEN = get_max_token(llm_kwargs) - EXCEED_ALLO = 512 + 512 * exceeded_cnt - inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) - gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n' - mutable[index][2] = f"截断重试" - continue # 返回重试 - else: - # 【选择放弃】 - tb_str = '```\n' + trimmed_format_exc() + '```' - gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" - if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0] - mutable[index][2] = "输入过长已放弃" - return gpt_say # 放弃 - except: - # 【第三种情况】:其他错误 - if detect_timeout(): raise RuntimeError("检测到程序终止。") - tb_str = '```\n' + trimmed_format_exc() + '```' - print(tb_str) - gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" - if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0] - if retry_op > 0: - retry_op -= 1 - wait = random.randint(5, 20) - if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str): - wait = wait * 3 - fail_info = "OpenAI绑定信用卡可解除频率限制 " - else: - fail_info = "" - # 也许等待十几秒后,情况会好转 - for i in range(wait): - mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1) - # 开始重试 - if detect_timeout(): raise RuntimeError("检测到程序终止。") - mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}" - continue # 返回重试 - else: - mutable[index][2] = "已失败" - wait = 5 - time.sleep(5) - return gpt_say # 放弃 - - # 异步任务开始 - futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip( - range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)] - cnt = 0 - while True: - # yield一次以刷新前端页面 - time.sleep(refresh_interval) - cnt += 1 - worker_done = [h.done() for h in futures] - # 更好的UI视觉效果 - observe_win = [] - # 每个线程都要“喂狗”(看门狗) - for thread_index, _ in enumerate(worker_done): - mutable[thread_index][1] = time.time() - # 在前端打印些好玩的东西 - for thread_index, _ in enumerate(worker_done): - print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\ - replace('\n', '').replace('`', '.').replace(' ', '.').replace('
', '.....').replace('$', '.')+"`... ]" - observe_win.append(print_something_really_funny) - # 在前端打印些好玩的东西 - stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n' - if not done else f'`{mutable[thread_index][2]}`\n\n' - for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)]) - # 在前端打印些好玩的东西 - chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))] - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - if all(worker_done): - executor.shutdown() - break - - # 异步任务结束 - gpt_response_collection = [] - for inputs_show_user, f in zip(inputs_show_user_array, futures): - gpt_res = f.result() - gpt_response_collection.extend([inputs_show_user, gpt_res]) - - # 是否在结束时,在界面上显示结果 - if show_user_at_complete: - for inputs_show_user, f in zip(inputs_show_user_array, futures): - gpt_res = f.result() - chatbot.append([inputs_show_user, gpt_res]) - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - time.sleep(0.5) - return gpt_response_collection - - - -def read_and_clean_pdf_text(fp): - """ - 这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好 - - **输入参数说明** - - `fp`:需要读取和清理文本的pdf文件路径 - - **输出参数说明** - - `meta_txt`:清理后的文本内容字符串 - - `page_one_meta`:第一页清理后的文本内容列表 - - **函数功能** - 读取pdf文件并清理其中的文本内容,清理规则包括: - - 提取所有块元的文本信息,并合并为一个字符串 - - 去除短块(字符数小于100)并替换为回车符 - - 清理多余的空行 - - 合并小写字母开头的段落块并替换为空格 - - 清除重复的换行 - - 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔 - """ - import fitz, copy - import re - import numpy as np - from colorful import print亮黄, print亮绿 - fc = 0 # Index 0 文本 - fs = 1 # Index 1 字体 - fb = 2 # Index 2 框框 - REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等) - REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化) - def primary_ffsize(l): - """ - 提取文本块主字体 - """ - fsize_statiscs = {} - for wtf in l['spans']: - if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0 - fsize_statiscs[wtf['size']] += len(wtf['text']) - return max(fsize_statiscs, key=fsize_statiscs.get) - - def ffsize_same(a,b): - """ - 提取字体大小是否近似相等 - """ - return abs((a-b)/max(a,b)) < 0.02 - - with fitz.open(fp) as doc: - meta_txt = [] - meta_font = [] - - meta_line = [] - meta_span = [] - ############################## <第 1 步,搜集初始信息> ################################## - for index, page in enumerate(doc): - # file_content += page.get_text() - text_areas = page.get_text("dict") # 获取页面上的文本信息 - for t in text_areas['blocks']: - if 'lines' in t: - pf = 998 - for l in t['lines']: - txt_line = "".join([wtf['text'] for wtf in l['spans']]) - if len(txt_line) == 0: continue - pf = primary_ffsize(l) - meta_line.append([txt_line, pf, l['bbox'], l]) - for wtf in l['spans']: # for l in t['lines']: - meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])]) - # meta_line.append(["NEW_BLOCK", pf]) - # 块元提取 for each word segment with in line for each line cross-line words for each block - meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace( - '- ', '') for t in text_areas['blocks'] if 'lines' in t]) - meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']]) - for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t]) - if index == 0: - page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace( - '- ', '') for t in text_areas['blocks'] if 'lines' in t] - - ############################## <第 2 步,获取正文主字体> ################################## - try: - fsize_statiscs = {} - for span in meta_span: - if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0 - fsize_statiscs[span[1]] += span[2] - main_fsize = max(fsize_statiscs, key=fsize_statiscs.get) - if REMOVE_FOOT_NOTE: - give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT - except: - raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。') - ############################## <第 3 步,切分和重新整合> ################################## - mega_sec = [] - sec = [] - for index, line in enumerate(meta_line): - if index == 0: - sec.append(line[fc]) - continue - if REMOVE_FOOT_NOTE: - if meta_line[index][fs] <= give_up_fize_threshold: - continue - if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]): - # 尝试识别段落 - if meta_line[index][fc].endswith('.') and\ - (meta_line[index-1][fc] != 'NEW_BLOCK') and \ - (meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7: - sec[-1] += line[fc] - sec[-1] += "\n\n" - else: - sec[-1] += " " - sec[-1] += line[fc] - else: - if (index+1 < len(meta_line)) and \ - meta_line[index][fs] > main_fsize: - # 单行 + 字体大 - mega_sec.append(copy.deepcopy(sec)) - sec = [] - sec.append("# " + line[fc]) - else: - # 尝试识别section - if meta_line[index-1][fs] > meta_line[index][fs]: - sec.append("\n" + line[fc]) - else: - sec.append(line[fc]) - mega_sec.append(copy.deepcopy(sec)) - - finals = [] - for ms in mega_sec: - final = " ".join(ms) - final = final.replace('- ', ' ') - finals.append(final) - meta_txt = finals - - ############################## <第 4 步,乱七八糟的后处理> ################################## - def 把字符太少的块清除为回车(meta_txt): - for index, block_txt in enumerate(meta_txt): - if len(block_txt) < 100: - meta_txt[index] = '\n' - return meta_txt - meta_txt = 把字符太少的块清除为回车(meta_txt) - - def 清理多余的空行(meta_txt): - for index in reversed(range(1, len(meta_txt))): - if meta_txt[index] == '\n' and meta_txt[index-1] == '\n': - meta_txt.pop(index) - return meta_txt - meta_txt = 清理多余的空行(meta_txt) - - def 合并小写开头的段落块(meta_txt): - def starts_with_lowercase_word(s): - pattern = r"^[a-z]+" - match = re.match(pattern, s) - if match: - return True - else: - return False - # 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写 - if starts_with_lowercase_word(meta_txt[0]): - meta_txt[0] = meta_txt[0].capitalize() - for _ in range(100): - for index, block_txt in enumerate(meta_txt): - if starts_with_lowercase_word(block_txt): - if meta_txt[index-1] != '\n': - meta_txt[index-1] += ' ' - else: - meta_txt[index-1] = '' - meta_txt[index-1] += meta_txt[index] - meta_txt[index] = '\n' - return meta_txt - meta_txt = 合并小写开头的段落块(meta_txt) - meta_txt = 清理多余的空行(meta_txt) - - meta_txt = '\n'.join(meta_txt) - # 清除重复的换行 - for _ in range(5): - meta_txt = meta_txt.replace('\n\n', '\n') - - # 换行 -> 双换行 - meta_txt = meta_txt.replace('\n', '\n\n') - - ############################## <第 5 步,展示分割效果> ################################## - # for f in finals: - # print亮黄(f) - # print亮绿('***************************') - - return meta_txt, page_one_meta - - -def get_files_from_everything(txt, type): # type='.md' - """ - 这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。 - 下面是对每个参数和返回值的说明: - 参数 - - txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。 - - type: 字符串,表示要搜索的文件类型。默认是.md。 - 返回值 - - success: 布尔值,表示函数是否成功执行。 - - file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。 - - project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。 - 该函数详细注释已添加,请确认是否满足您的需要。 - """ - import glob, os - - success = True - if txt.startswith('http'): - # 网络的远程文件 - import requests - from toolbox import get_conf - from toolbox import get_log_folder, gen_time_str - proxies = get_conf('proxies') - try: - r = requests.get(txt, proxies=proxies) - except: - raise ConnectionRefusedError(f"无法下载资源{txt},请检查。") - path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type) - with open(path, 'wb+') as f: f.write(r.content) - project_folder = get_log_folder(plugin_name='web_download') - file_manifest = [path] - elif txt.endswith(type): - # 直接给定文件 - file_manifest = [txt] - project_folder = os.path.dirname(txt) - elif os.path.exists(txt): - # 本地路径,递归搜索 - project_folder = txt - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)] - if len(file_manifest) == 0: - success = False - else: - project_folder = None - file_manifest = [] - success = False - - return success, file_manifest, project_folder - - - -@Singleton -class nougat_interface(): - def __init__(self): - self.threadLock = threading.Lock() - - def nougat_with_timeout(self, command, cwd, timeout=3600): - import subprocess - from toolbox import ProxyNetworkActivate - logging.info(f'正在执行命令 {command}') - with ProxyNetworkActivate("Nougat_Download"): - process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ) - try: - stdout, stderr = process.communicate(timeout=timeout) - except subprocess.TimeoutExpired: - process.kill() - stdout, stderr = process.communicate() - print("Process timed out!") - return False - return True - - - def NOUGAT_parse_pdf(self, fp, chatbot, history): - from toolbox import update_ui_lastest_msg - - yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...", - chatbot=chatbot, history=history, delay=0) - self.threadLock.acquire() - import glob, threading, os - from toolbox import get_log_folder, gen_time_str - dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str()) - os.makedirs(dst) - - yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", - chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600) - res = glob.glob(os.path.join(dst,'*.mmd')) - if len(res) == 0: - self.threadLock.release() - raise RuntimeError("Nougat解析论文失败。") - self.threadLock.release() - return res[0] - - - - -def try_install_deps(deps, reload_m=[]): - import subprocess, sys, importlib - for dep in deps: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep]) - import site - importlib.reload(site) - for m in reload_m: - importlib.reload(__import__(m)) - - -def get_plugin_arg(plugin_kwargs, key, default): - # 如果参数是空的 - if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key) - # 正常情况 - return plugin_kwargs.get(key, default) diff --git a/crazy_functions/diagram_fns/file_tree.py b/crazy_functions/diagram_fns/file_tree.py deleted file mode 100644 index fa7e2e4c4bf56329b0d6c8beb8c5de2cbdbce8b0..0000000000000000000000000000000000000000 --- a/crazy_functions/diagram_fns/file_tree.py +++ /dev/null @@ -1,122 +0,0 @@ -import os -from textwrap import indent - -class FileNode: - def __init__(self, name): - self.name = name - self.children = [] - self.is_leaf = False - self.level = 0 - self.parenting_ship = [] - self.comment = "" - self.comment_maxlen_show = 50 - - @staticmethod - def add_linebreaks_at_spaces(string, interval=10): - return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval)) - - def sanitize_comment(self, comment): - if len(comment) > self.comment_maxlen_show: suf = '...' - else: suf = '' - comment = comment[:self.comment_maxlen_show] - comment = comment.replace('\"', '').replace('`', '').replace('\n', '').replace('`', '').replace('$', '') - comment = self.add_linebreaks_at_spaces(comment, 10) - return '`' + comment + suf + '`' - - def add_file(self, file_path, file_comment): - directory_names, file_name = os.path.split(file_path) - current_node = self - level = 1 - if directory_names == "": - new_node = FileNode(file_name) - current_node.children.append(new_node) - new_node.is_leaf = True - new_node.comment = self.sanitize_comment(file_comment) - new_node.level = level - current_node = new_node - else: - dnamesplit = directory_names.split(os.sep) - for i, directory_name in enumerate(dnamesplit): - found_child = False - level += 1 - for child in current_node.children: - if child.name == directory_name: - current_node = child - found_child = True - break - if not found_child: - new_node = FileNode(directory_name) - current_node.children.append(new_node) - new_node.level = level - 1 - current_node = new_node - term = FileNode(file_name) - term.level = level - term.comment = self.sanitize_comment(file_comment) - term.is_leaf = True - current_node.children.append(term) - - def print_files_recursively(self, level=0, code="R0"): - print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level)) - for j, child in enumerate(self.children): - child.print_files_recursively(level=level+1, code=code+str(j)) - self.parenting_ship.extend(child.parenting_ship) - p1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]""" - p2 = """ --> """ - p3 = f"""{code+str(j)}[\"🗎{child.name}\"]""" if child.is_leaf else f"""{code+str(j)}[[\"📁{child.name}\"]]""" - edge_code = p1 + p2 + p3 - if edge_code in self.parenting_ship: - continue - self.parenting_ship.append(edge_code) - if self.comment != "": - pc1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]""" - pc2 = f""" -.-x """ - pc3 = f"""C{code}[\"{self.comment}\"]:::Comment""" - edge_code = pc1 + pc2 + pc3 - self.parenting_ship.append(edge_code) - - -MERMAID_TEMPLATE = r""" -```mermaid -flowchart LR - %% 一个特殊标记,用于在生成mermaid图表时隐藏代码块 - classDef Comment stroke-dasharray: 5 5 - subgraph {graph_name} -{relationship} - end -``` -""" - -def build_file_tree_mermaid_diagram(file_manifest, file_comments, graph_name): - # Create the root node - file_tree_struct = FileNode("root") - # Build the tree structure - for file_path, file_comment in zip(file_manifest, file_comments): - file_tree_struct.add_file(file_path, file_comment) - file_tree_struct.print_files_recursively() - cc = "\n".join(file_tree_struct.parenting_ship) - ccc = indent(cc, prefix=" "*8) - return MERMAID_TEMPLATE.format(graph_name=graph_name, relationship=ccc) - -if __name__ == "__main__": - # File manifest - file_manifest = [ - "cradle_void_terminal.ipynb", - "tests/test_utils.py", - "tests/test_plugins.py", - "tests/test_llms.py", - "config.py", - "build/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/model_weights_0.bin", - "crazy_functions/latex_fns/latex_actions.py", - "crazy_functions/latex_fns/latex_toolbox.py" - ] - file_comments = [ - "根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件", - "包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器", - "用于构建HTML报告的类和方法用于构建HTML报告的类和方法用于构建HTML报告的类和方法", - "包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码", - "用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数", - "是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块", - "用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器", - "包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类", - ] - print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树")) \ No newline at end of file diff --git a/crazy_functions/game_fns/game_ascii_art.py b/crazy_functions/game_fns/game_ascii_art.py deleted file mode 100644 index e0b700877415f04437413ac1765fa90fe1b0844f..0000000000000000000000000000000000000000 --- a/crazy_functions/game_fns/game_ascii_art.py +++ /dev/null @@ -1,42 +0,0 @@ -from toolbox import CatchException, update_ui, update_ui_lastest_msg -from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing -import random - - -class MiniGame_ASCII_Art(GptAcademicGameBaseState): - def step(self, prompt, chatbot, history): - if self.step_cnt == 0: - chatbot.append(["我画你猜(动物)", "请稍等..."]) - else: - if prompt.strip() == 'exit': - self.delete_game = True - yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.) - return - chatbot.append([prompt, ""]) - yield from update_ui(chatbot=chatbot, history=history) - - if self.step_cnt == 0: - self.lock_plugin(chatbot) - self.cur_task = 'draw' - - if self.cur_task == 'draw': - avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"] - self.obj = random.choice(avail_obj) - inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \ - f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. " - raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="") - self.cur_task = 'identify user guess' - res = get_code_block(raw_res) - history += ['', f'the answer is {self.obj}', inputs, res] - yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.) - - elif self.cur_task == 'identify user guess': - if is_same_thing(self.obj, prompt, self.llm_kwargs): - self.delete_game = True - yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.) - else: - self.cur_task = 'identify user guess' - yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.) \ No newline at end of file diff --git a/crazy_functions/game_fns/game_interactive_story.py b/crazy_functions/game_fns/game_interactive_story.py deleted file mode 100644 index 5c25f4a350409006ca7a4cd03f010d6b47eb044f..0000000000000000000000000000000000000000 --- a/crazy_functions/game_fns/game_interactive_story.py +++ /dev/null @@ -1,212 +0,0 @@ -prompts_hs = """ 请以“{headstart}”为开头,编写一个小说的第一幕。 - -- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。 -- 出现人物时,给出人物的名字。 -- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。 -- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。 -- 字数要求:第一幕的字数少于300字,且少于2个段落。 -""" - -prompts_interact = """ 小说的前文回顾: -「 -{previously_on_story} -」 - -你是一个作家,根据以上的情节,给出4种不同的后续剧情发展方向,每个发展方向都精明扼要地用一句话说明。稍后,我将在这4个选择中,挑选一种剧情发展。 - -输出格式例如: -1. 后续剧情发展1 -2. 后续剧情发展2 -3. 后续剧情发展3 -4. 后续剧情发展4 -""" - - -prompts_resume = """小说的前文回顾: -「 -{previously_on_story} -」 - -你是一个作家,我们正在互相讨论,确定后续剧情的发展。 -在以下的剧情发展中, -「 -{choice} -」 -我认为更合理的是:{user_choice}。 -请在前文的基础上(不要重复前文),围绕我选定的剧情情节,编写小说的下一幕。 - -- 禁止杜撰不符合我选择的剧情。 -- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。 -- 不要重复前文。 -- 出现人物时,给出人物的名字。 -- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。 -- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。 -- 小说的下一幕字数少于300字,且少于2个段落。 -""" - - -prompts_terminate = """小说的前文回顾: -「 -{previously_on_story} -」 - -你是一个作家,我们正在互相讨论,确定后续剧情的发展。 -现在,故事该结束了,我认为最合理的故事结局是:{user_choice}。 - -请在前文的基础上(不要重复前文),编写小说的最后一幕。 - -- 不要重复前文。 -- 出现人物时,给出人物的名字。 -- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。 -- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。 -- 字数要求:最后一幕的字数少于1000字。 -""" - - -from toolbox import CatchException, update_ui, update_ui_lastest_msg -from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing -import random - - -class MiniGame_ResumeStory(GptAcademicGameBaseState): - story_headstart = [ - '先行者知道,他现在是全宇宙中唯一的一个人了。', - '深夜,一个年轻人穿过天安门广场向纪念堂走去。在二十二世纪编年史中,计算机把他的代号定为M102。', - '他知道,这最后一课要提前讲了。又一阵剧痛从肝部袭来,几乎使他晕厥过去。', - '在距地球五万光年的远方,在银河系的中心,一场延续了两万年的星际战争已接近尾声。那里的太空中渐渐隐现出一个方形区域,仿佛灿烂的群星的背景被剪出一个方口。', - '伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行,他们的目的地是南极,如果几天后能顺利到达那里,他们将钻出地壳去看诗云。', - '很多人生来就会莫名其妙地迷上一样东西,仿佛他的出生就是要和这东西约会似的,正是这样,圆圆迷上了肥皂泡。' - ] - - - def begin_game_step_0(self, prompt, chatbot, history): - # init game at step 0 - self.headstart = random.choice(self.story_headstart) - self.story = [] - chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"]) - self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。' - - - def generate_story_image(self, story_paragraph): - try: - from crazy_functions.图片生成 import gen_image - prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求:80字以内。') - image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural') - return f'
' - except: - return '' - - def step(self, prompt, chatbot, history): - - """ - 首先,处理游戏初始化等特殊情况 - """ - if self.step_cnt == 0: - self.begin_game_step_0(prompt, chatbot, history) - self.lock_plugin(chatbot) - self.cur_task = 'head_start' - else: - if prompt.strip() == 'exit' or prompt.strip() == '结束剧情': - # should we terminate game here? - self.delete_game = True - yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.) - return - if '剧情收尾' in prompt: - self.cur_task = 'story_terminate' - # # well, game resumes - # chatbot.append([prompt, ""]) - # update ui, don't keep the user waiting - yield from update_ui(chatbot=chatbot, history=history) - - - """ - 处理游戏的主体逻辑 - """ - if self.cur_task == 'head_start': - """ - 这是游戏的第一步 - """ - inputs_ = prompts_hs.format(headstart=self.headstart) - history_ = [] - story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, '故事开头', self.llm_kwargs, - chatbot, history_, self.sys_prompt_ - ) - self.story.append(story_paragraph) - # # 配图 - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.) - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.) - - # # 构建后续剧情引导 - previously_on_story = "" - for s in self.story: - previously_on_story += s + '\n' - inputs_ = prompts_interact.format(previously_on_story=previously_on_story) - history_ = [] - self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs, - chatbot, - history_, - self.sys_prompt_ - ) - self.cur_task = 'user_choice' - - - elif self.cur_task == 'user_choice': - """ - 根据用户的提示,确定故事的下一步 - """ - if '请在以下几种故事走向中,选择一种' in chatbot[-1][0]: chatbot.pop(-1) - previously_on_story = "" - for s in self.story: - previously_on_story += s + '\n' - inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt) - history_ = [] - story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs, - chatbot, history_, self.sys_prompt_ - ) - self.story.append(story_paragraph) - # # 配图 - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.) - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.) - - # # 构建后续剧情引导 - previously_on_story = "" - for s in self.story: - previously_on_story += s + '\n' - inputs_ = prompts_interact.format(previously_on_story=previously_on_story) - history_ = [] - self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, - '请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs, - chatbot, - history_, - self.sys_prompt_ - ) - self.cur_task = 'user_choice' - - - elif self.cur_task == 'story_terminate': - """ - 根据用户的提示,确定故事的结局 - """ - previously_on_story = "" - for s in self.story: - previously_on_story += s + '\n' - inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt) - history_ = [] - story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs, - chatbot, history_, self.sys_prompt_ - ) - # # 配图 - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.) - yield from update_ui_lastest_msg(lastmsg=story_paragraph + '
'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.) - - # terminate game - self.delete_game = True - return diff --git a/crazy_functions/game_fns/game_utils.py b/crazy_functions/game_fns/game_utils.py deleted file mode 100644 index 09b6f7a935f3e1f254c4cd0f3b74f78e4c2af298..0000000000000000000000000000000000000000 --- a/crazy_functions/game_fns/game_utils.py +++ /dev/null @@ -1,35 +0,0 @@ - -from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError -from request_llms.bridge_all import predict_no_ui_long_connection -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) == 1: - return "```" + matches[0] + "```" # code block - raise RuntimeError("GPT is not generating proper code.") - -def is_same_thing(a, b, llm_kwargs): - from pydantic import BaseModel, Field - class IsSameThing(BaseModel): - is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False) - - def run_gpt_fn(inputs, sys_prompt, history=[]): - return predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, - history=history, sys_prompt=sys_prompt, observe_window=[] - ) - - gpt_json_io = GptJsonIO(IsSameThing) - inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b) - inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing." - analyze_res_cot_01 = run_gpt_fn(inputs_01, "", []) - - inputs_02 = inputs_01 + gpt_json_io.format_instructions - analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01]) - - try: - res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn) - return res.is_same_thing - except JsonStringError as e: - return False \ No newline at end of file diff --git a/crazy_functions/gen_fns/gen_fns_shared.py b/crazy_functions/gen_fns/gen_fns_shared.py deleted file mode 100644 index 8e73794e84437e861d3468d4f0ab799deae6d98c..0000000000000000000000000000000000000000 --- a/crazy_functions/gen_fns/gen_fns_shared.py +++ /dev/null @@ -1,70 +0,0 @@ -import time -import importlib -from toolbox import trimmed_format_exc, gen_time_str, get_log_folder -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder -from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg -import multiprocessing - -def get_class_name(class_string): - import re - # Use regex to extract the class name - class_name = re.search(r'class (\w+)\(', class_string).group(1) - return class_name - -def try_make_module(code, chatbot): - module_file = 'gpt_fn_' + gen_time_str().replace('-','_') - fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py' - with open(fn_path, 'w', encoding='utf8') as f: f.write(code) - promote_file_to_downloadzone(fn_path, chatbot=chatbot) - class_name = get_class_name(code) - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict)) - # only has 10 seconds to run - p.start(); p.join(timeout=10) - if p.is_alive(): p.terminate(); p.join() - p.close() - return return_dict["success"], return_dict['traceback'] - -# check is_function_successfully_generated -def is_function_successfully_generated(fn_path, class_name, return_dict): - return_dict['success'] = False - return_dict['traceback'] = "" - try: - # Create a spec for the module - module_spec = importlib.util.spec_from_file_location('example_module', fn_path) - # Load the module - example_module = importlib.util.module_from_spec(module_spec) - module_spec.loader.exec_module(example_module) - # Now you can use the module - some_class = getattr(example_module, class_name) - # Now you can create an instance of the class - instance = some_class() - return_dict['success'] = True - return - except: - return_dict['traceback'] = trimmed_format_exc() - return - -def subprocess_worker(code, file_path, return_dict): - return_dict['result'] = None - return_dict['success'] = False - return_dict['traceback'] = "" - try: - module_file = 'gpt_fn_' + gen_time_str().replace('-','_') - fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py' - with open(fn_path, 'w', encoding='utf8') as f: f.write(code) - class_name = get_class_name(code) - # Create a spec for the module - module_spec = importlib.util.spec_from_file_location('example_module', fn_path) - # Load the module - example_module = importlib.util.module_from_spec(module_spec) - module_spec.loader.exec_module(example_module) - # Now you can use the module - some_class = getattr(example_module, class_name) - # Now you can create an instance of the class - instance = some_class() - return_dict['result'] = instance.run(file_path) - return_dict['success'] = True - except: - return_dict['traceback'] = trimmed_format_exc() diff --git a/crazy_functions/ipc_fns/mp.py b/crazy_functions/ipc_fns/mp.py deleted file mode 100644 index 575d47ccecbb775205193085c58c06a114d3bfc2..0000000000000000000000000000000000000000 --- a/crazy_functions/ipc_fns/mp.py +++ /dev/null @@ -1,37 +0,0 @@ -import platform -import pickle -import multiprocessing - -def run_in_subprocess_wrapper_func(v_args): - func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args) - import sys - try: - result = func(*args, **kwargs) - return_dict['result'] = result - except Exception as e: - exc_info = sys.exc_info() - exception_dict['exception'] = exc_info - -def run_in_subprocess_with_timeout(func, timeout=60): - if platform.system() == 'Linux': - def wrapper(*args, **kwargs): - return_dict = multiprocessing.Manager().dict() - exception_dict = multiprocessing.Manager().dict() - v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict)) - process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,)) - process.start() - process.join(timeout) - if process.is_alive(): - process.terminate() - raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务') - process.close() - if 'exception' in exception_dict: - # ooops, the subprocess ran into an exception - exc_info = exception_dict['exception'] - raise exc_info[1].with_traceback(exc_info[2]) - if 'result' in return_dict.keys(): - # If the subprocess ran successfully, return the result - return return_dict['result'] - return wrapper - else: - return func \ No newline at end of file diff --git a/crazy_functions/json_fns/pydantic_io.py b/crazy_functions/json_fns/pydantic_io.py deleted file mode 100644 index 4e300d65dd918f890d64e68e0cc5a37f36366585..0000000000000000000000000000000000000000 --- a/crazy_functions/json_fns/pydantic_io.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/model_io/output_parsers/pydantic.ipynb - -Example 1. - -# Define your desired data structure. -class Joke(BaseModel): - setup: str = Field(description="question to set up a joke") - punchline: str = Field(description="answer to resolve the joke") - - # You can add custom validation logic easily with Pydantic. - @validator("setup") - def question_ends_with_question_mark(cls, field): - if field[-1] != "?": - raise ValueError("Badly formed question!") - return field - - -Example 2. - -# Here's another example, but with a compound typed field. -class Actor(BaseModel): - name: str = Field(description="name of an actor") - film_names: List[str] = Field(description="list of names of films they starred in") -""" - -import json, re, logging - - -PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. - -As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} -the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. - -Here is the output schema: -``` -{schema} -```""" - - -PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSON instance that conforms to the JSON schema below. -``` -{schema} -```""" - -class JsonStringError(Exception): ... - -class GptJsonIO(): - - def __init__(self, schema, example_instruction=True): - self.pydantic_object = schema - self.example_instruction = example_instruction - self.format_instructions = self.generate_format_instructions() - - def generate_format_instructions(self): - schema = self.pydantic_object.schema() - - # Remove extraneous fields. - reduced_schema = schema - if "title" in reduced_schema: - del reduced_schema["title"] - if "type" in reduced_schema: - del reduced_schema["type"] - # Ensure json in context is well-formed with double quotes. - if self.example_instruction: - schema_str = json.dumps(reduced_schema) - return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) - else: - return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str) - - def generate_output(self, text): - # Greedy search for 1st json candidate. - match = re.search( - r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL - ) - json_str = "" - if match: json_str = match.group() - json_object = json.loads(json_str, strict=False) - final_object = self.pydantic_object.parse_obj(json_object) - return final_object - - def generate_repair_prompt(self, broken_json, error): - prompt = "Fix a broken json string.\n\n" + \ - "(1) The broken json string need to fix is: \n\n" + \ - "```" + "\n" + \ - broken_json + "\n" + \ - "```" + "\n\n" + \ - "(2) The error message is: \n\n" + \ - error + "\n\n" + \ - "Now, fix this json string. \n\n" - return prompt - - def generate_output_auto_repair(self, response, gpt_gen_fn): - """ - response: string containing canidate json - gpt_gen_fn: gpt_gen_fn(inputs, sys_prompt) - """ - try: - result = self.generate_output(response) - except Exception as e: - try: - logging.info(f'Repairing json:{response}') - repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e)) - result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions)) - logging.info('Repaire json success.') - except Exception as e: - # 没辙了,放弃治疗 - logging.info('Repaire json fail.') - raise JsonStringError('Cannot repair json.', str(e)) - return result - diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py deleted file mode 100644 index 8772f5e1fb530d72be282deaef2eb18ed9ffa1d2..0000000000000000000000000000000000000000 --- a/crazy_functions/latex_fns/latex_actions.py +++ /dev/null @@ -1,467 +0,0 @@ -from toolbox import update_ui, update_ui_lastest_msg, get_log_folder -from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone -from .latex_toolbox import PRESERVE, TRANSFORM -from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace -from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process -from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout -from .latex_toolbox import find_title_and_abs - -import os, shutil -import re -import numpy as np - -pj = os.path.join - - -def split_subprocess(txt, project_folder, return_dict, opts): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - """ - text = txt - mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM - - # 吸收title与作者以上的部分 - text, mask = set_forbidden_text(text, mask, r"^(.*?)\\maketitle", re.DOTALL) - text, mask = set_forbidden_text(text, mask, r"^(.*?)\\begin{document}", re.DOTALL) - # 吸收iffalse注释 - text, mask = set_forbidden_text(text, mask, r"\\iffalse(.*?)\\fi", re.DOTALL) - # 吸收在42行以内的begin-end组合 - text, mask = set_forbidden_text_begin_end(text, mask, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=42) - # 吸收匿名公式 - text, mask = set_forbidden_text(text, mask, [ r"\$\$([^$]+)\$\$", r"\\\[.*?\\\]" ], re.DOTALL) - # 吸收其他杂项 - text, mask = set_forbidden_text(text, mask, [ r"\\section\{(.*?)\}", r"\\section\*\{(.*?)\}", r"\\subsection\{(.*?)\}", r"\\subsubsection\{(.*?)\}" ]) - text, mask = set_forbidden_text(text, mask, [ r"\\bibliography\{(.*?)\}", r"\\bibliographystyle\{(.*?)\}" ]) - text, mask = set_forbidden_text(text, mask, r"\\begin\{thebibliography\}.*?\\end\{thebibliography\}", re.DOTALL) - text, mask = set_forbidden_text(text, mask, r"\\begin\{lstlisting\}(.*?)\\end\{lstlisting\}", re.DOTALL) - text, mask = set_forbidden_text(text, mask, r"\\begin\{wraptable\}(.*?)\\end\{wraptable\}", re.DOTALL) - text, mask = set_forbidden_text(text, mask, r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}", re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{wrapfigure\}(.*?)\\end\{wrapfigure\}", r"\\begin\{wrapfigure\*\}(.*?)\\end\{wrapfigure\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{figure\}(.*?)\\end\{figure\}", r"\\begin\{figure\*\}(.*?)\\end\{figure\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{multline\}(.*?)\\end\{multline\}", r"\\begin\{multline\*\}(.*?)\\end\{multline\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{table\}(.*?)\\end\{table\}", r"\\begin\{table\*\}(.*?)\\end\{table\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{minipage\}(.*?)\\end\{minipage\}", r"\\begin\{minipage\*\}(.*?)\\end\{minipage\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{align\*\}(.*?)\\end\{align\*\}", r"\\begin\{align\}(.*?)\\end\{align\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\begin\{equation\}(.*?)\\end\{equation\}", r"\\begin\{equation\*\}(.*?)\\end\{equation\*\}"], re.DOTALL) - text, mask = set_forbidden_text(text, mask, [r"\\includepdf\[(.*?)\]\{(.*?)\}", r"\\clearpage", r"\\newpage", r"\\appendix", r"\\tableofcontents", r"\\include\{(.*?)\}"]) - text, mask = set_forbidden_text(text, mask, [r"\\vspace\{(.*?)\}", r"\\hspace\{(.*?)\}", r"\\label\{(.*?)\}", r"\\begin\{(.*?)\}", r"\\end\{(.*?)\}", r"\\item "]) - text, mask = set_forbidden_text_careful_brace(text, mask, r"\\hl\{(.*?)\}", re.DOTALL) - # reverse 操作必须放在最后 - text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\caption\{(.*?)\}", re.DOTALL, forbid_wrapper=True) - text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\abstract\{(.*?)\}", re.DOTALL, forbid_wrapper=True) - text, mask = reverse_forbidden_text(text, mask, r"\\begin\{abstract\}(.*?)\\end\{abstract\}", re.DOTALL, forbid_wrapper=True) - root = convert_to_linklist(text, mask) - - # 最后一步处理,增强稳健性 - root = post_process(root) - - # 输出html调试文件,用红色标注处保留区(PRESERVE),用黑色标注转换区(TRANSFORM) - with open(pj(project_folder, 'debug_log.html'), 'w', encoding='utf8') as f: - segment_parts_for_gpt = [] - nodes = [] - node = root - while True: - nodes.append(node) - show_html = node.string.replace('\n','
') - if not node.preserve: - segment_parts_for_gpt.append(node.string) - f.write(f'

#{node.range}{show_html}#

') - else: - f.write(f'

{show_html}

') - node = node.next - if node is None: break - - for n in nodes: n.next = None # break - return_dict['nodes'] = nodes - return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt - return return_dict - -class LatexPaperSplit(): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - """ - def __init__(self) -> None: - self.nodes = None - self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \ - "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ - "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" - # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) - self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" - self.title = "unknown" - self.abstract = "unknown" - - def read_title_and_abstract(self, txt): - try: - title, abstract = find_title_and_abs(txt) - if title is not None: - self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') - if abstract is not None: - self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') - except: - pass - - def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10): - """ - Merge the result after the GPT process completed - """ - result_string = "" - node_cnt = 0 - line_cnt = 0 - - for node in self.nodes: - if node.preserve: - line_cnt += node.string.count('\n') - result_string += node.string - else: - translated_txt = fix_content(arr[node_cnt], node.string) - begin_line = line_cnt - end_line = line_cnt + translated_txt.count('\n') - - # reverse translation if any error - if any([begin_line-buggy_line_surgery_n_lines <= b_line <= end_line+buggy_line_surgery_n_lines for b_line in buggy_lines]): - translated_txt = node.string - - result_string += translated_txt - node_cnt += 1 - line_cnt += translated_txt.count('\n') - - if mode == 'translate_zh': - pattern = re.compile(r'\\begin\{abstract\}.*\n') - match = pattern.search(result_string) - if not match: - # match \abstract{xxxx} - pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL) - match = pattern_compile.search(result_string) - position = match.regs[1][0] - else: - # match \begin{abstract}xxxx\end{abstract} - position = match.end() - result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:] - return result_string - - - def split(self, txt, project_folder, opts): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - P.S. use multiprocessing to avoid timeout error - """ - import multiprocessing - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process( - target=split_subprocess, - args=(txt, project_folder, return_dict, opts)) - p.start() - p.join() - p.close() - self.nodes = return_dict['nodes'] - self.sp = return_dict['segment_parts_for_gpt'] - return self.sp - - -class LatexPaperFileGroup(): - """ - use tokenizer to break down text according to max_token_limit - """ - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - # count_token - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - use tokenizer to break down text according to max_token_limit - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + '.polish.tex', 'w', encoding='utf8') as f: - manifest.append(path + '.polish.tex') - f.write(res) - return manifest - - -def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]): - import time, os, re - from ..crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .latex_actions import LatexPaperFileGroup, LatexPaperSplit - - # <-------- 寻找主tex文件 ----------> - maintex = find_main_tex_file(file_manifest, mode) - chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(3) - - # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> - main_tex_basename = os.path.basename(maintex) - assert main_tex_basename.endswith('.tex') - main_tex_basename_bare = main_tex_basename[:-4] - may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl') - if os.path.exists(may_exist_bbl): - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl')) - - with open(maintex, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() - merged_content = merge_tex_files(project_folder, content, mode) - - with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f: - f.write(merged_content) - - # <-------- 精细切分latex文件 ----------> - chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - lps = LatexPaperSplit() - lps.read_title_and_abstract(merged_content) - res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - # <-------- 拆分过长的latex片段 ----------> - pfg = LatexPaperFileGroup() - for index, r in enumerate(res): - pfg.file_paths.append('segment-' + str(index)) - pfg.file_contents.append(r) - - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 根据需要切换prompt ----------> - inputs_array, sys_prompt_array = switch_prompt(pfg, mode) - inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag] - - if os.path.exists(pj(project_folder,'temp.pkl')): - - # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> - pfg = objload(file=pj(project_folder,'temp.pkl')) - - else: - # <-------- gpt 多线程请求 ----------> - history_array = [[""] for _ in range(n_split)] - # LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL') - # if LATEX_EXPERIMENTAL: - # paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`" - # paper_meta_max_len = 888 - # history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=history_array, - sys_prompt_array=sys_prompt_array, - # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待 - scroller_max_len = 40 - ) - - # <-------- 文本碎片重组为完整的tex片段 ----------> - pfg.sp_file_result = [] - for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - - # <-------- 临时存储用于调试 ----------> - pfg.get_token_num = None - objdump(pfg, file=pj(project_folder,'temp.pkl')) - - write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder) - - # <-------- 写出文件 ----------> - msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" - final_tex = lps.merge_result(pfg.file_result, mode, msg) - objdump((lps, pfg.file_result, mode, msg), file=pj(project_folder,'merge_result.pkl')) - - with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: - if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) - - - # <-------- 整理结果, 退出 ----------> - chatbot.append((f"完成了吗?", 'GPT结果已输出, 即将编译PDF')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------- 返回 ----------> - return project_folder + f'/merge_{mode}.tex' - - -def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified, fixed_line=[]): - try: - with open(log_path, 'r', encoding='utf-8', errors='replace') as f: - log = f.read() - import re - buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log) - buggy_lines = [int(l) for l in buggy_lines] - buggy_lines = sorted(buggy_lines) - buggy_line = buggy_lines[0]-1 - print("reversing tex line that has errors", buggy_line) - - # 重组,逆转出错的段落 - if buggy_line not in fixed_line: - fixed_line.append(buggy_line) - - lps, file_result, mode, msg = objload(file=pj(work_folder_modified,'merge_result.pkl')) - final_tex = lps.merge_result(file_result, mode, msg, buggy_lines=fixed_line, buggy_line_surgery_n_lines=5*n_fix) - - with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f: - f.write(final_tex) - - return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines - except: - print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") - return False, -1, [-1] - - -def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'): - import os, time - n_fix = 1 - fixed_line = [] - max_try = 32 - chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) - chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面 - yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面 - - while True: - import os - may_exist_bbl = pj(work_folder_modified, f'merge.bbl') - target_bbl = pj(work_folder_modified, f'{main_file_modified}.bbl') - if os.path.exists(may_exist_bbl) and not os.path.exists(target_bbl): - shutil.copyfile(may_exist_bbl, target_bbl) - - # https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')): - # 只有第二步成功,才能继续下面的步骤 - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面 - if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original) - if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if mode!='translate_zh': - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面 - print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd()) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - - # <---------- 检查结果 -----------> - results_ = "" - original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf')) - modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')) - diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf')) - results_ += f"原始PDF编译是否成功: {original_pdf_success};" - results_ += f"转化PDF编译是否成功: {modified_pdf_success};" - results_ += f"对比PDF编译是否成功: {diff_pdf_success};" - yield from update_ui_lastest_msg(f'第{n_fix}编译结束:
{results_}...', chatbot, history) # 刷新Gradio前端界面 - - if diff_pdf_success: - result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - if modified_pdf_success: - yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面 - result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path - origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path - if os.path.exists(pj(work_folder, '..', 'translation')): - shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf')) - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - # 将两个PDF拼接 - if original_pdf_success: - try: - from .latex_toolbox import merge_pdfs - concat_pdf = pj(work_folder_modified, f'comparison.pdf') - merge_pdfs(origin_pdf, result_pdf, concat_pdf) - if os.path.exists(pj(work_folder, '..', 'translation')): - shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf')) - promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - except Exception as e: - print(e) - pass - return True # 成功啦 - else: - if n_fix>=max_try: break - n_fix += 1 - can_retry, main_file_modified, buggy_lines = remove_buggy_lines( - file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), - log_path=pj(work_folder_modified, f'{main_file_modified}.log'), - tex_name=f'{main_file_modified}.tex', - tex_name_pure=f'{main_file_modified}', - n_fix=n_fix, - work_folder_modified=work_folder_modified, - fixed_line=fixed_line - ) - yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面 - if not can_retry: break - - return False # 失败啦 - - -def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): - # write html - try: - import shutil - from crazy_functions.pdf_fns.report_gen_html import construct_html - from toolbox import gen_time_str - ch = construct_html() - orig = "" - trans = "" - final = [] - for c,r in zip(sp_file_contents, sp_file_result): - final.append(c) - final.append(r) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{gen_time_str()}.trans.html" - res = ch.save_file(create_report_file_name) - shutil.copyfile(res, pj(project_folder, create_report_file_name)) - promote_file_to_downloadzone(file=res, chatbot=chatbot) - except: - from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py deleted file mode 100644 index bbd1bb3c6b85a9731912388f187b248a626ffd52..0000000000000000000000000000000000000000 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ /dev/null @@ -1,694 +0,0 @@ -import os, shutil -import re -import numpy as np - -PRESERVE = 0 -TRANSFORM = 1 - -pj = os.path.join - - -class LinkedListNode: - """ - Linked List Node - """ - - def __init__(self, string, preserve=True) -> None: - self.string = string - self.preserve = preserve - self.next = None - self.range = None - # self.begin_line = 0 - # self.begin_char = 0 - - -def convert_to_linklist(text, mask): - root = LinkedListNode("", preserve=True) - current_node = root - for c, m, i in zip(text, mask, range(len(text))): - if (m == PRESERVE and current_node.preserve) or ( - m == TRANSFORM and not current_node.preserve - ): - # add - current_node.string += c - else: - current_node.next = LinkedListNode(c, preserve=(m == PRESERVE)) - current_node = current_node.next - return root - - -def post_process(root): - # 修复括号 - node = root - while True: - string = node.string - if node.preserve: - node = node.next - if node is None: - break - continue - - def break_check(string): - str_stack = [""] # (lv, index) - for i, c in enumerate(string): - if c == "{": - str_stack.append("{") - elif c == "}": - if len(str_stack) == 1: - print("stack fix") - return i - str_stack.pop(-1) - else: - str_stack[-1] += c - return -1 - - bp = break_check(string) - - if bp == -1: - pass - elif bp == 0: - node.string = string[:1] - q = LinkedListNode(string[1:], False) - q.next = node.next - node.next = q - else: - node.string = string[:bp] - q = LinkedListNode(string[bp:], False) - q.next = node.next - node.next = q - - node = node.next - if node is None: - break - - # 屏蔽空行和太短的句子 - node = root - while True: - if len(node.string.strip("\n").strip("")) == 0: - node.preserve = True - if len(node.string.strip("\n").strip("")) < 42: - node.preserve = True - node = node.next - if node is None: - break - node = root - while True: - if node.next and node.preserve and node.next.preserve: - node.string += node.next.string - node.next = node.next.next - node = node.next - if node is None: - break - - # 将前后断行符脱离 - node = root - prev_node = None - while True: - if not node.preserve: - lstriped_ = node.string.lstrip().lstrip("\n") - if ( - (prev_node is not None) - and (prev_node.preserve) - and (len(lstriped_) != len(node.string)) - ): - prev_node.string += node.string[: -len(lstriped_)] - node.string = lstriped_ - rstriped_ = node.string.rstrip().rstrip("\n") - if ( - (node.next is not None) - and (node.next.preserve) - and (len(rstriped_) != len(node.string)) - ): - node.next.string = node.string[len(rstriped_) :] + node.next.string - node.string = rstriped_ - # =-=-= - prev_node = node - node = node.next - if node is None: - break - - # 标注节点的行数范围 - node = root - n_line = 0 - expansion = 2 - while True: - n_l = node.string.count("\n") - node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围 - n_line = n_line + n_l - node = node.next - if node is None: - break - return root - - -""" -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1) -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -""" - - -def set_forbidden_text(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper - e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}" - you can mask out (mask = PRESERVE so that text become untouchable for GPT) - everything between "\begin{equation}" and "\end{equation}" - """ - if isinstance(pattern, list): - pattern = "|".join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - mask[res.span()[0] : res.span()[1]] = PRESERVE - return text, mask - - -def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \begin{abstract} blablablablablabla. \end{abstract} - """ - if isinstance(pattern, list): - pattern = "|".join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - if not forbid_wrapper: - mask[res.span()[0] : res.span()[1]] = TRANSFORM - else: - mask[res.regs[0][0] : res.regs[1][0]] = PRESERVE # '\\begin{abstract}' - mask[res.regs[1][0] : res.regs[1][1]] = TRANSFORM # abstract - mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract - return text, mask - - -def set_forbidden_text_careful_brace(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper (text become untouchable for GPT). - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = -1 - p = begin = end = res.regs[0][0] - for _ in range(1024 * 16): - if text[p] == "}" and brace_level == 0: - break - elif text[p] == "}": - brace_level -= 1 - elif text[p] == "{": - brace_level += 1 - p += 1 - end = p + 1 - mask[begin:end] = PRESERVE - return text, mask - - -def reverse_forbidden_text_careful_brace( - text, mask, pattern, flags=0, forbid_wrapper=True -): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = 0 - p = begin = end = res.regs[1][0] - for _ in range(1024 * 16): - if text[p] == "}" and brace_level == 0: - break - elif text[p] == "}": - brace_level -= 1 - elif text[p] == "{": - brace_level += 1 - p += 1 - end = p - mask[begin:end] = TRANSFORM - if forbid_wrapper: - mask[res.regs[0][0] : begin] = PRESERVE - mask[end : res.regs[0][1]] = PRESERVE - return text, mask - - -def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42): - """ - Find all \begin{} ... \end{} text block that with less than limit_n_lines lines. - Add it to preserve area - """ - pattern_compile = re.compile(pattern, flags) - - def search_with_line_limit(text, mask): - for res in pattern_compile.finditer(text): - cmd = res.group(1) # begin{what} - this = res.group(2) # content between begin and end - this_mask = mask[res.regs[2][0] : res.regs[2][1]] - white_list = [ - "document", - "abstract", - "lemma", - "definition", - "sproof", - "em", - "emph", - "textit", - "textbf", - "itemize", - "enumerate", - ] - if (cmd in white_list) or this.count( - "\n" - ) >= limit_n_lines: # use a magical number 42 - this, this_mask = search_with_line_limit(this, this_mask) - mask[res.regs[2][0] : res.regs[2][1]] = this_mask - else: - mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE - return text, mask - - return search_with_line_limit(text, mask) - - -""" -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -Latex Merge File -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -""" - - -def find_main_tex_file(file_manifest, mode): - """ - 在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。 - P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码) - """ - canidates = [] - for texf in file_manifest: - if os.path.basename(texf).startswith("merge"): - continue - with open(texf, "r", encoding="utf8", errors="ignore") as f: - file_content = f.read() - if r"\documentclass" in file_content: - canidates.append(texf) - else: - continue - - if len(canidates) == 0: - raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)") - elif len(canidates) == 1: - return canidates[0] - else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回 - canidates_score = [] - # 给出一些判定模板文档的词作为扣分项 - unexpected_words = [ - "\\LaTeX", - "manuscript", - "Guidelines", - "font", - "citations", - "rejected", - "blind review", - "reviewers", - ] - expected_words = ["\\input", "\\ref", "\\cite"] - for texf in canidates: - canidates_score.append(0) - with open(texf, "r", encoding="utf8", errors="ignore") as f: - file_content = f.read() - file_content = rm_comments(file_content) - for uw in unexpected_words: - if uw in file_content: - canidates_score[-1] -= 1 - for uw in expected_words: - if uw in file_content: - canidates_score[-1] += 1 - select = np.argmax(canidates_score) # 取评分最高者返回 - return canidates[select] - - -def rm_comments(main_file): - new_file_remove_comment_lines = [] - for l in main_file.splitlines(): - # 删除整行的空注释 - if l.lstrip().startswith("%"): - pass - else: - new_file_remove_comment_lines.append(l) - main_file = "\n".join(new_file_remove_comment_lines) - # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令 - main_file = re.sub(r"(? 0 and node_string.count("\_") > final_tex.count("\_"): - # walk and replace any _ without \ - final_tex = re.sub(r"(?= limit_n_lines: # use a magical number 42 - this, this_mask = search_with_line_limit(this, this_mask) - mask[res.regs[2][0]:res.regs[2][1]] = this_mask - else: - mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE - return text, mask - return search_with_line_limit(text, mask) - -class LinkedListNode(): - """ - Linked List Node - """ - def __init__(self, string, preserve=True) -> None: - self.string = string - self.preserve = preserve - self.next = None - # self.begin_line = 0 - # self.begin_char = 0 - -def convert_to_linklist(text, mask): - root = LinkedListNode("", preserve=True) - current_node = root - for c, m, i in zip(text, mask, range(len(text))): - if (m==PRESERVE and current_node.preserve) \ - or (m==TRANSFORM and not current_node.preserve): - # add - current_node.string += c - else: - current_node.next = LinkedListNode(c, preserve=(m==PRESERVE)) - current_node = current_node.next - return root -""" -======================================================================== -Latex Merge File -======================================================================== -""" - -def 寻找Latex主文件(file_manifest, mode): - """ - 在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。 - P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码) - """ - canidates = [] - for texf in file_manifest: - if os.path.basename(texf).startswith('merge'): - continue - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - if r'\documentclass' in file_content: - canidates.append(texf) - else: - continue - - if len(canidates) == 0: - raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)') - elif len(canidates) == 1: - return canidates[0] - else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回 - canidates_score = [] - # 给出一些判定模板文档的词作为扣分项 - unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers'] - expected_words = ['\input', '\ref', '\cite'] - for texf in canidates: - canidates_score.append(0) - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - for uw in unexpected_words: - if uw in file_content: - canidates_score[-1] -= 1 - for uw in expected_words: - if uw in file_content: - canidates_score[-1] += 1 - select = np.argmax(canidates_score) # 取评分最高者返回 - return canidates[select] - -def rm_comments(main_file): - new_file_remove_comment_lines = [] - for l in main_file.splitlines(): - # 删除整行的空注释 - if l.lstrip().startswith("%"): - pass - else: - new_file_remove_comment_lines.append(l) - main_file = '\n'.join(new_file_remove_comment_lines) - # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令 - main_file = re.sub(r'(? 0 and node_string.count('\_') > final_tex.count('\_'): - # walk and replace any _ without \ - final_tex = re.sub(r"(?') - if not node.preserve: - segment_parts_for_gpt.append(node.string) - f.write(f'

#{show_html}#

') - else: - f.write(f'

{show_html}

') - node = node.next - if node is None: break - - for n in nodes: n.next = None # break - return_dict['nodes'] = nodes - return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt - return return_dict - - - -class LatexPaperSplit(): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - """ - def __init__(self) -> None: - self.nodes = None - self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \ - "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ - "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" - # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) - self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" - - def merge_result(self, arr, mode, msg): - """ - Merge the result after the GPT process completed - """ - result_string = "" - p = 0 - for node in self.nodes: - if node.preserve: - result_string += node.string - else: - result_string += fix_content(arr[p], node.string) - p += 1 - if mode == 'translate_zh': - pattern = re.compile(r'\\begin\{abstract\}.*\n') - match = pattern.search(result_string) - if not match: - # match \abstract{xxxx} - pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL) - match = pattern_compile.search(result_string) - position = match.regs[1][0] - else: - # match \begin{abstract}xxxx\end{abstract} - position = match.end() - result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:] - return result_string - - def split(self, txt, project_folder, opts): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - P.S. use multiprocessing to avoid timeout error - """ - import multiprocessing - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process( - target=split_subprocess, - args=(txt, project_folder, return_dict, opts)) - p.start() - p.join() - p.close() - self.nodes = return_dict['nodes'] - self.sp = return_dict['segment_parts_for_gpt'] - return self.sp - - - -class LatexPaperFileGroup(): - """ - use tokenizer to break down text according to max_token_limit - """ - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - use tokenizer to break down text according to max_token_limit - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - print('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + '.polish.tex', 'w', encoding='utf8') as f: - manifest.append(path + '.polish.tex') - f.write(res) - return manifest - -def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): - - # write html - try: - import shutil - from .crazy_utils import construct_html - from toolbox import gen_time_str - ch = construct_html() - orig = "" - trans = "" - final = [] - for c,r in zip(sp_file_contents, sp_file_result): - final.append(c) - final.append(r) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{gen_time_str()}.trans.html" - ch.save_file(create_report_file_name) - shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name)) - promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot) - except: - from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) - -def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件 - - # <-------- 寻找主tex文件 ----------> - maintex = 寻找Latex主文件(file_manifest, mode) - chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(3) - - # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> - main_tex_basename = os.path.basename(maintex) - assert main_tex_basename.endswith('.tex') - main_tex_basename_bare = main_tex_basename[:-4] - may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl') - if os.path.exists(may_exist_bbl): - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl')) - - with open(maintex, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() - merged_content = merge_tex_files(project_folder, content, mode) - - with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f: - f.write(merged_content) - - # <-------- 精细切分latex文件 ----------> - chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - lps = LatexPaperSplit() - res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - - # <-------- 拆分过长的latex片段 ----------> - pfg = LatexPaperFileGroup() - for index, r in enumerate(res): - pfg.file_paths.append('segment-' + str(index)) - pfg.file_contents.append(r) - - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 根据需要切换prompt ----------> - inputs_array, sys_prompt_array = switch_prompt(pfg, mode) - inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag] - - if os.path.exists(pj(project_folder,'temp.pkl')): - - # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> - pfg = objload(file=pj(project_folder,'temp.pkl')) - - else: - # <-------- gpt 多线程请求 ----------> - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待 - scroller_max_len = 40 - ) - - # <-------- 文本碎片重组为完整的tex片段 ----------> - pfg.sp_file_result = [] - for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - - # <-------- 临时存储用于调试 ----------> - pfg.get_token_num = None - objdump(pfg, file=pj(project_folder,'temp.pkl')) - - write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder) - - # <-------- 写出文件 ----------> - msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" - final_tex = lps.merge_result(pfg.file_result, mode, msg) - with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: - if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) - - - # <-------- 整理结果, 退出 ----------> - chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------- 返回 ----------> - return project_folder + f'/merge_{mode}.tex' - - - -def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified): - try: - with open(log_path, 'r', encoding='utf-8', errors='replace') as f: - log = f.read() - with open(file_path, 'r', encoding='utf-8', errors='replace') as f: - file_lines = f.readlines() - import re - buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log) - buggy_lines = [int(l) for l in buggy_lines] - buggy_lines = sorted(buggy_lines) - print("removing lines that has errors", buggy_lines) - file_lines.pop(buggy_lines[0]-1) - with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f: - f.writelines(file_lines) - return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines - except: - print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") - return False, -1, [-1] - -def compile_latex_with_timeout(command, cwd, timeout=60): - import subprocess - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) - try: - stdout, stderr = process.communicate(timeout=timeout) - except subprocess.TimeoutExpired: - process.kill() - stdout, stderr = process.communicate() - print("Process timed out!") - return False - return True - -def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'): - import os, time - current_dir = os.getcwd() - n_fix = 1 - max_try = 32 - chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) - chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面 - yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面 - - while True: - import os - - # https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')): - # 只有第二步成功,才能继续下面的步骤 - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面 - if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original) - if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if mode!='translate_zh': - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面 - print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - - - # <---------- 检查结果 -----------> - results_ = "" - original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf')) - modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')) - diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf')) - results_ += f"原始PDF编译是否成功: {original_pdf_success};" - results_ += f"转化PDF编译是否成功: {modified_pdf_success};" - results_ += f"对比PDF编译是否成功: {diff_pdf_success};" - yield from update_ui_lastest_msg(f'第{n_fix}编译结束:
{results_}...', chatbot, history) # 刷新Gradio前端界面 - - if diff_pdf_success: - result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - if modified_pdf_success: - yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面 - result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path - if os.path.exists(pj(work_folder, '..', 'translation')): - shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf')) - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - return True # 成功啦 - else: - if n_fix>=max_try: break - n_fix += 1 - can_retry, main_file_modified, buggy_lines = remove_buggy_lines( - file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), - log_path=pj(work_folder_modified, f'{main_file_modified}.log'), - tex_name=f'{main_file_modified}.tex', - tex_name_pure=f'{main_file_modified}', - n_fix=n_fix, - work_folder_modified=work_folder_modified, - ) - yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面 - if not can_retry: break - - return False # 失败啦 - - - diff --git a/crazy_functions/live_audio/aliyunASR.py b/crazy_functions/live_audio/aliyunASR.py deleted file mode 100644 index cba4c01f86be93b4fbb7ef474330a6a104c59431..0000000000000000000000000000000000000000 --- a/crazy_functions/live_audio/aliyunASR.py +++ /dev/null @@ -1,261 +0,0 @@ -import time, logging, json, sys, struct -import numpy as np -from scipy.io.wavfile import WAVE_FORMAT - -def write_numpy_to_wave(filename, rate, data, add_header=False): - """ - Write a NumPy array as a WAV file. - """ - def _array_tofile(fid, data): - # ravel gives a c-contiguous buffer - fid.write(data.ravel().view('b').data) - - if hasattr(filename, 'write'): - fid = filename - else: - fid = open(filename, 'wb') - - fs = rate - - try: - dkind = data.dtype.kind - if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and - data.dtype.itemsize == 1)): - raise ValueError("Unsupported data type '%s'" % data.dtype) - - header_data = b'' - - header_data += b'RIFF' - header_data += b'\x00\x00\x00\x00' - header_data += b'WAVE' - - # fmt chunk - header_data += b'fmt ' - if dkind == 'f': - format_tag = WAVE_FORMAT.IEEE_FLOAT - else: - format_tag = WAVE_FORMAT.PCM - if data.ndim == 1: - channels = 1 - else: - channels = data.shape[1] - bit_depth = data.dtype.itemsize * 8 - bytes_per_second = fs*(bit_depth // 8)*channels - block_align = channels * (bit_depth // 8) - - fmt_chunk_data = struct.pack(' 0xFFFFFFFF: - raise ValueError("Data exceeds wave file size limit") - if add_header: - fid.write(header_data) - # data chunk - fid.write(b'data') - fid.write(struct.pack('' or (data.dtype.byteorder == '=' and - sys.byteorder == 'big'): - data = data.byteswap() - _array_tofile(fid, data) - - if add_header: - # Determine file size and place it in correct - # position at start of the file. - size = fid.tell() - fid.seek(4) - fid.write(struct.pack('{}".format(args)) - pass - - def test_on_close(self, *args): - self.aliyun_service_ok = False - pass - - def test_on_result_chg(self, message, *args): - # print("test_on_chg:{}".format(message)) - message = json.loads(message) - self.parsed_text = message['payload']['result'] - self.event_on_result_chg.set() - - def test_on_completed(self, message, *args): - # print("on_completed:args=>{} message=>{}".format(args, message)) - pass - - def audio_convertion_thread(self, uuid): - # 在一个异步线程中采集音频 - import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git - import tempfile - from scipy import io - from toolbox import get_conf - from .audio_io import change_sample_rate - from .audio_io import RealtimeAudioDistribution - NEW_SAMPLERATE = 16000 - rad = RealtimeAudioDistribution() - rad.clean_up() - temp_folder = tempfile.gettempdir() - TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY') - if len(TOKEN) == 0: - TOKEN = self.get_token() - self.aliyun_service_ok = True - URL="wss://nls-gateway.aliyuncs.com/ws/v1" - sr = nls.NlsSpeechTranscriber( - url=URL, - token=TOKEN, - appkey=APPKEY, - on_sentence_begin=self.test_on_sentence_begin, - on_sentence_end=self.test_on_sentence_end, - on_start=self.test_on_start, - on_result_changed=self.test_on_result_chg, - on_completed=self.test_on_completed, - on_error=self.test_on_error, - on_close=self.test_on_close, - callback_args=[uuid.hex] - ) - timeout_limit_second = 20 - r = sr.start(aformat="pcm", - timeout=timeout_limit_second, - enable_intermediate_result=True, - enable_punctuation_prediction=True, - enable_inverse_text_normalization=True) - - import webrtcvad - vad = webrtcvad.Vad() - vad.set_mode(1) - - is_previous_frame_transmitted = False # 上一帧是否有人说话 - previous_frame_data = None - echo_cnt = 0 # 在没有声音之后,继续向服务器发送n次音频数据 - echo_cnt_max = 4 # 在没有声音之后,继续向服务器发送n次音频数据 - keep_alive_last_send_time = time.time() - while not self.stop: - # time.sleep(self.capture_interval) - audio = rad.read(uuid.hex) - if audio is not None: - # convert to pcm file - temp_file = f'{temp_folder}/{uuid.hex}.pcm' # - dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000 - write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata) - # read pcm binary - with open(temp_file, "rb") as f: data = f.read() - is_speaking, info = is_speaker_speaking(vad, data, NEW_SAMPLERATE) - - if is_speaking or echo_cnt > 0: - # 如果话筒激活 / 如果处于回声收尾阶段 - echo_cnt -= 1 - if not is_previous_frame_transmitted: # 上一帧没有人声,但是我们把上一帧同样加上 - if previous_frame_data is not None: data = previous_frame_data + data - if is_speaking: - echo_cnt = echo_cnt_max - slices = zip(*(iter(data),) * 640) # 640个字节为一组 - for i in slices: sr.send_audio(bytes(i)) - keep_alive_last_send_time = time.time() - is_previous_frame_transmitted = True - else: - is_previous_frame_transmitted = False - echo_cnt = 0 - # 保持链接激活,即使没有声音,也根据时间间隔,发送一些音频片段给服务器 - if time.time() - keep_alive_last_send_time > timeout_limit_second/2: - slices = zip(*(iter(data),) * 640) # 640个字节为一组 - for i in slices: sr.send_audio(bytes(i)) - keep_alive_last_send_time = time.time() - is_previous_frame_transmitted = True - self.audio_shape = info - else: - time.sleep(0.1) - - if not self.aliyun_service_ok: - self.stop = True - self.stop_msg = 'Aliyun音频服务异常,请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期。' - r = sr.stop() - - def get_token(self): - from toolbox import get_conf - import json - from aliyunsdkcore.request import CommonRequest - from aliyunsdkcore.client import AcsClient - AccessKey_ID, AccessKey_secret = get_conf('ALIYUN_ACCESSKEY', 'ALIYUN_SECRET') - - # 创建AcsClient实例 - client = AcsClient( - AccessKey_ID, - AccessKey_secret, - "cn-shanghai" - ) - - # 创建request,并设置参数。 - request = CommonRequest() - request.set_method('POST') - request.set_domain('nls-meta.cn-shanghai.aliyuncs.com') - request.set_version('2019-02-28') - request.set_action_name('CreateToken') - - try: - response = client.do_action_with_exception(request) - print(response) - jss = json.loads(response) - if 'Token' in jss and 'Id' in jss['Token']: - token = jss['Token']['Id'] - expireTime = jss['Token']['ExpireTime'] - print("token = " + token) - print("expireTime = " + str(expireTime)) - except Exception as e: - print(e) - - return token diff --git a/crazy_functions/live_audio/audio_io.py b/crazy_functions/live_audio/audio_io.py deleted file mode 100644 index 00fd3f2d846ccf20eb300b796bb91842315e3482..0000000000000000000000000000000000000000 --- a/crazy_functions/live_audio/audio_io.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy as np -from scipy import interpolate - -def Singleton(cls): - _instance = {} - - def _singleton(*args, **kargs): - if cls not in _instance: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] - - return _singleton - - -@Singleton -class RealtimeAudioDistribution(): - def __init__(self) -> None: - self.data = {} - self.max_len = 1024*1024 - self.rate = 48000 # 只读,每秒采样数量 - - def clean_up(self): - self.data = {} - - def feed(self, uuid, audio): - self.rate, audio_ = audio - # print('feed', len(audio_), audio_[-25:]) - if uuid not in self.data: - self.data[uuid] = audio_ - else: - new_arr = np.concatenate((self.data[uuid], audio_)) - if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:] - self.data[uuid] = new_arr - - def read(self, uuid): - if uuid in self.data: - res = self.data.pop(uuid) - # print('\r read-', len(res), '-', max(res), end='', flush=True) - else: - res = None - return res - -def change_sample_rate(audio, old_sr, new_sr): - duration = audio.shape[0] / old_sr - - time_old = np.linspace(0, duration, audio.shape[0]) - time_new = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr)) - - interpolator = interpolate.interp1d(time_old, audio.T) - new_audio = interpolator(time_new).T - return new_audio.astype(np.int16) \ No newline at end of file diff --git a/crazy_functions/multi_stage/multi_stage_utils.py b/crazy_functions/multi_stage/multi_stage_utils.py deleted file mode 100644 index 1395e79ff132de3622d2dd3b3867f3916399e061..0000000000000000000000000000000000000000 --- a/crazy_functions/multi_stage/multi_stage_utils.py +++ /dev/null @@ -1,93 +0,0 @@ -from pydantic import BaseModel, Field -from typing import List -from toolbox import update_ui_lastest_msg, disable_auto_promotion -from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError -import time -import pickle - -def have_any_recent_upload_files(chatbot): - _5min = 5 * 60 - if not chatbot: return False # chatbot is None - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - if not most_recent_uploaded: return False # most_recent_uploaded is None - if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new - else: return False # most_recent_uploaded is too old - -class GptAcademicState(): - def __init__(self): - self.reset() - - def reset(self): - pass - - def dump_state(self, chatbot): - chatbot._cookies['plugin_state'] = pickle.dumps(self) - - def set_state(self, chatbot, key, value): - setattr(self, key, value) - chatbot._cookies['plugin_state'] = pickle.dumps(self) - - def get_state(chatbot, cls=None): - state = chatbot._cookies.get('plugin_state', None) - if state is not None: state = pickle.loads(state) - elif cls is not None: state = cls() - else: state = GptAcademicState() - state.chatbot = chatbot - return state - - -class GptAcademicGameBaseState(): - """ - 1. first init: __init__ -> - """ - def init_game(self, chatbot, lock_plugin): - self.plugin_name = None - self.callback_fn = None - self.delete_game = False - self.step_cnt = 0 - - def lock_plugin(self, chatbot): - if self.callback_fn is None: - raise ValueError("callback_fn is None") - chatbot._cookies['lock_plugin'] = self.callback_fn - self.dump_state(chatbot) - - def get_plugin_name(self): - if self.plugin_name is None: - raise ValueError("plugin_name is None") - return self.plugin_name - - def dump_state(self, chatbot): - chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self) - - def set_state(self, chatbot, key, value): - setattr(self, key, value) - chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self) - - @staticmethod - def sync_state(chatbot, llm_kwargs, cls, plugin_name, callback_fn, lock_plugin=True): - state = chatbot._cookies.get(f'plugin_state/{plugin_name}', None) - if state is not None: - state = pickle.loads(state) - else: - state = cls() - state.init_game(chatbot, lock_plugin) - state.plugin_name = plugin_name - state.llm_kwargs = llm_kwargs - state.chatbot = chatbot - state.callback_fn = callback_fn - return state - - def continue_game(self, prompt, chatbot, history): - # 游戏主体 - yield from self.step(prompt, chatbot, history) - self.step_cnt += 1 - # 保存状态,收尾 - self.dump_state(chatbot) - # 如果游戏结束,清理 - if self.delete_game: - chatbot._cookies['lock_plugin'] = None - chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = None - yield from update_ui(chatbot=chatbot, history=history) diff --git a/crazy_functions/pdf_fns/breakdown_txt.py b/crazy_functions/pdf_fns/breakdown_txt.py deleted file mode 100644 index e7c767361f946e664b4a0e258fa9698529225300..0000000000000000000000000000000000000000 --- a/crazy_functions/pdf_fns/breakdown_txt.py +++ /dev/null @@ -1,125 +0,0 @@ -from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout - -def force_breakdown(txt, limit, get_token_fn): - """ 当无法用标点、空行分割时,我们用最暴力的方法切割 - """ - for i in reversed(range(len(txt))): - if get_token_fn(txt[:i]) < limit: - return txt[:i], txt[i:] - return "Tiktoken未知错误", "Tiktoken未知错误" - - -def maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage): - """ 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage - 当 remain_txt_to_cut < `_min` 时,我们再把 remain_txt_to_cut_storage 中的部分文字取出 - """ - _min = int(5e4) - _max = int(1e5) - # print(len(remain_txt_to_cut), len(remain_txt_to_cut_storage)) - if len(remain_txt_to_cut) < _min and len(remain_txt_to_cut_storage) > 0: - remain_txt_to_cut = remain_txt_to_cut + remain_txt_to_cut_storage - remain_txt_to_cut_storage = "" - if len(remain_txt_to_cut) > _max: - remain_txt_to_cut_storage = remain_txt_to_cut[_max:] + remain_txt_to_cut_storage - remain_txt_to_cut = remain_txt_to_cut[:_max] - return remain_txt_to_cut, remain_txt_to_cut_storage - - -def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=False): - """ 文本切分 - """ - res = [] - total_len = len(txt_tocut) - fin_len = 0 - remain_txt_to_cut = txt_tocut - remain_txt_to_cut_storage = "" - # 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage - remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage) - - while True: - if get_token_fn(remain_txt_to_cut) <= limit: - # 如果剩余文本的token数小于限制,那么就不用切了 - res.append(remain_txt_to_cut); fin_len+=len(remain_txt_to_cut) - break - else: - # 如果剩余文本的token数大于限制,那么就切 - lines = remain_txt_to_cut.split('\n') - - # 估计一个切分点 - estimated_line_cut = limit / get_token_fn(remain_txt_to_cut) * len(lines) - estimated_line_cut = int(estimated_line_cut) - - # 开始查找合适切分点的偏移(cnt) - cnt = 0 - for cnt in reversed(range(estimated_line_cut)): - if must_break_at_empty_line: - # 首先尝试用双空行(\n\n)作为切分点 - if lines[cnt] != "": - continue - prev = "\n".join(lines[:cnt]) - post = "\n".join(lines[cnt:]) - if get_token_fn(prev) < limit: - break - - if cnt == 0: - # 如果没有找到合适的切分点 - if break_anyway: - # 是否允许暴力切分 - prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn) - else: - # 不允许直接报错 - raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}") - - # 追加列表 - res.append(prev); fin_len+=len(prev) - # 准备下一次迭代 - remain_txt_to_cut = post - remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage) - process = fin_len/total_len - print(f'正在文本切分 {int(process*100)}%') - if len(remain_txt_to_cut.strip()) == 0: - break - return res - - -def breakdown_text_to_satisfy_token_limit_(txt, limit, llm_model="gpt-3.5-turbo"): - """ 使用多种方式尝试切分文本,以满足 token 限制 - """ - from request_llms.bridge_all import model_info - enc = model_info[llm_model]['tokenizer'] - def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=())) - try: - # 第1次尝试,将双空行(\n\n)作为切分点 - return cut(limit, get_token_fn, txt, must_break_at_empty_line=True) - except RuntimeError: - try: - # 第2次尝试,将单空行(\n)作为切分点 - return cut(limit, get_token_fn, txt, must_break_at_empty_line=False) - except RuntimeError: - try: - # 第3次尝试,将英文句号(.)作为切分点 - res = cut(limit, get_token_fn, txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在 - return [r.replace('。\n', '.') for r in res] - except RuntimeError as e: - try: - # 第4次尝试,将中文句号(。)作为切分点 - res = cut(limit, get_token_fn, txt.replace('。', '。。\n'), must_break_at_empty_line=False) - return [r.replace('。。\n', '。') for r in res] - except RuntimeError as e: - # 第5次尝试,没办法了,随便切一下吧 - return cut(limit, get_token_fn, txt, must_break_at_empty_line=False, break_anyway=True) - -breakdown_text_to_satisfy_token_limit = run_in_subprocess_with_timeout(breakdown_text_to_satisfy_token_limit_, timeout=60) - -if __name__ == '__main__': - from crazy_functions.crazy_utils import read_and_clean_pdf_text - file_content, page_one = read_and_clean_pdf_text("build/assets/at.pdf") - - from request_llms.bridge_all import model_info - for i in range(5): - file_content += file_content - - print(len(file_content)) - TOKEN_LIMIT_PER_FRAGMENT = 2500 - res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT) - diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py deleted file mode 100644 index fa27de516feb735c0ac92ffa02be97164343d8cf..0000000000000000000000000000000000000000 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ /dev/null @@ -1,171 +0,0 @@ -from functools import lru_cache -from toolbox import gen_time_str -from toolbox import promote_file_to_downloadzone -from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import get_conf -from toolbox import ProxyNetworkActivate -from colorful import * -import requests -import random -import copy -import os -import math - -class GROBID_OFFLINE_EXCEPTION(Exception): pass - -def get_avail_grobid_url(): - GROBID_URLS = get_conf('GROBID_URLS') - if len(GROBID_URLS) == 0: return None - try: - _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡 - if _grobid_url.endswith('/'): _grobid_url = _grobid_url.rstrip('/') - with ProxyNetworkActivate('Connect_Grobid'): - res = requests.get(_grobid_url+'/api/isalive') - if res.text=='true': return _grobid_url - else: return None - except: - return None - -@lru_cache(maxsize=32) -def parse_pdf(pdf_path, grobid_url): - import scipdf # pip install scipdf_parser - if grobid_url.endswith('/'): grobid_url = grobid_url.rstrip('/') - try: - with ProxyNetworkActivate('Connect_Grobid'): - article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) - except GROBID_OFFLINE_EXCEPTION: - raise GROBID_OFFLINE_EXCEPTION("GROBID服务不可用,请修改config中的GROBID_URL,可修改成本地GROBID服务。") - except: - raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") - return article_dict - - -def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files): - # -=-=-=-=-=-=-=-= 写出第1个文件:翻译前后混合 -=-=-=-=-=-=-=-= - res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + gpt_response_collection, file_basename=f"{gen_time_str()}translated_and_original.md", file_fullname=None) - promote_file_to_downloadzone(res_path, rename_file=os.path.basename(res_path)+'.md', chatbot=chatbot) - generated_conclusion_files.append(res_path) - - # -=-=-=-=-=-=-=-= 写出第2个文件:仅翻译后的文本 -=-=-=-=-=-=-=-= - translated_res_array = [] - # 记录当前的大章节标题: - last_section_name = "" - for index, value in enumerate(gpt_response_collection): - # 先挑选偶数序列号: - if index % 2 != 0: - # 先提取当前英文标题: - cur_section_name = gpt_response_collection[index-1].split('\n')[0].split(" Part")[0] - # 如果index是1的话,则直接使用first section name: - if cur_section_name != last_section_name: - cur_value = cur_section_name + '\n' - last_section_name = copy.deepcopy(cur_section_name) - else: - cur_value = "" - # 再做一个小修改:重新修改当前part的标题,默认用英文的 - cur_value += value - translated_res_array.append(cur_value) - res_path = write_history_to_file(meta + ["# Meta Translation" , paper_meta_info] + translated_res_array, - file_basename = f"{gen_time_str()}-translated_only.md", - file_fullname = None, - auto_caption = False) - promote_file_to_downloadzone(res_path, rename_file=os.path.basename(res_path)+'.md', chatbot=chatbot) - generated_conclusion_files.append(res_path) - return res_path - -def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG): - from crazy_functions.pdf_fns.report_gen_html import construct_html - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - prompt = "以下是一篇学术论文的基本信息:\n" - # title - title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' - # authors - authors = article_dict.get('authors', '无法获取 authors')[:100]; prompt += f'authors:{authors}\n\n' - # abstract - abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' - # command - prompt += f"请将题目和摘要翻译为{DST_LANG}。" - meta = [f'# Title:\n\n', title, f'# Abstract:\n\n', abstract ] - - # 单线,获取文章meta信息 - paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, - inputs_show_user=prompt, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="You are an academic paper reader。", - ) - - # 多线,翻译 - inputs_array = [] - inputs_show_user_array = [] - - # get_token_num - from request_llms.bridge_all import model_info - enc = model_info[llm_kwargs['llm_model']]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - - def break_down(txt): - raw_token_num = get_token_num(txt) - if raw_token_num <= TOKEN_LIMIT_PER_FRAGMENT: - return [txt] - else: - # raw_token_num > TOKEN_LIMIT_PER_FRAGMENT - # find a smooth token limit to achieve even seperation - count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT)) - token_limit_smooth = raw_token_num // count + count - return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model']) - - for section in article_dict.get('sections'): - if len(section['text']) == 0: continue - section_frags = break_down(section['text']) - for i, fragment in enumerate(section_frags): - heading = section['heading'] - if len(section_frags) > 1: heading += f' Part-{i+1}' - inputs_array.append( - f"你需要翻译{heading}章节,内容如下: \n\n{fragment}" - ) - inputs_show_user_array.append( - f"# {heading}\n\n{fragment}" - ) - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[meta for _ in inputs_array], - sys_prompt_array=[ - "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array], - ) - # -=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-= - produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files) - - # -=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-= - ch = construct_html() - orig = "" - trans = "" - gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): - if i%2==0: - gpt_response_collection_html[i] = inputs_show_user_array[i//2] - else: - # 先提取当前英文标题: - cur_section_name = gpt_response_collection[i-1].split('\n')[0].split(" Part")[0] - cur_value = cur_section_name + "\n" + gpt_response_collection_html[i] - gpt_response_collection_html[i] = cur_value - - final = ["", "", "一、论文概况", "", "Abstract", paper_meta_info, "二、论文翻译", ""] - final.extend(gpt_response_collection_html) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{os.path.basename(fp)}.trans.html" - html_file = ch.save_file(create_report_file_name) - generated_conclusion_files.append(html_file) - promote_file_to_downloadzone(html_file, rename_file=os.path.basename(html_file), chatbot=chatbot) diff --git a/crazy_functions/pdf_fns/parse_word.py b/crazy_functions/pdf_fns/parse_word.py deleted file mode 100644 index 64d07dcd48156162eea40b8b9fd3c105ccbf1af2..0000000000000000000000000000000000000000 --- a/crazy_functions/pdf_fns/parse_word.py +++ /dev/null @@ -1,85 +0,0 @@ -from crazy_functions.crazy_utils import read_and_clean_pdf_text, get_files_from_everything -import os -import re -def extract_text_from_files(txt, chatbot, history): - """ - 查找pdf/md/word并获取文本内容并返回状态以及文本 - - 输入参数 Args: - chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化) - history (list): List of chat history (历史,对话历史列表) - - 输出 Returns: - 文件是否存在(bool) - final_result(list):文本内容 - page_one(list):第一页内容/摘要 - file_manifest(list):文件路径 - excption(string):需要用户手动处理的信息,如没出错则保持为空 - """ - - final_result = [] - page_one = [] - file_manifest = [] - excption = "" - - if txt == "": - final_result.append(txt) - return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 - - #查找输入区内容中的文件 - file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') - file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md') - file_word,word_manifest,folder_word = get_files_from_everything(txt, '.docx') - file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc') - - if file_doc: - excption = "word" - return False, final_result, page_one, file_manifest, excption - - file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest) - if file_num == 0: - final_result.append(txt) - return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 - - if file_pdf: - try: # 尝试导入依赖,如果缺少依赖,则给出安装建议 - import fitz - except: - excption = "pdf" - return False, final_result, page_one, file_manifest, excption - for index, fp in enumerate(pdf_manifest): - file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - pdf_one = str(pdf_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - final_result.append(file_content) - page_one.append(pdf_one) - file_manifest.append(os.path.relpath(fp, folder_pdf)) - - if file_md: - for index, fp in enumerate(md_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - file_content = file_content.encode('utf-8', 'ignore').decode() - headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要 - if len(headers) > 0: - page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割 - else: - page_one.append("") - final_result.append(file_content) - file_manifest.append(os.path.relpath(fp, folder_md)) - - if file_word: - try: # 尝试导入依赖,如果缺少依赖,则给出安装建议 - from docx import Document - except: - excption = "word_pip" - return False, final_result, page_one, file_manifest, excption - for index, fp in enumerate(word_manifest): - doc = Document(fp) - file_content = '\n'.join([p.text for p in doc.paragraphs]) - file_content = file_content.encode('utf-8', 'ignore').decode() - page_one.append(file_content[:200]) - final_result.append(file_content) - file_manifest.append(os.path.relpath(fp, folder_word)) - - return True, final_result, page_one, file_manifest, excption \ No newline at end of file diff --git a/crazy_functions/pdf_fns/report_gen_html.py b/crazy_functions/pdf_fns/report_gen_html.py deleted file mode 100644 index 21829212ff13a2dfd1492f05ac9abc73907dce7b..0000000000000000000000000000000000000000 --- a/crazy_functions/pdf_fns/report_gen_html.py +++ /dev/null @@ -1,58 +0,0 @@ -from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder -import os - - - - -class construct_html(): - def __init__(self) -> None: - self.html_string = "" - - def add_row(self, a, b): - from toolbox import markdown_convertion - template = """ - { - primary_col: { - header: String.raw`__PRIMARY_HEADER__`, - msg: String.raw`__PRIMARY_MSG__`, - }, - secondary_rol: { - header: String.raw`__SECONDARY_HEADER__`, - msg: String.raw`__SECONDARY_MSG__`, - } - }, - """ - def std(str): - str = str.replace(r'`',r'`') - if str.endswith("\\"): str += ' ' - if str.endswith("}"): str += ' ' - if str.endswith("$"): str += ' ' - return str - - template_ = template - a_lines = a.split('\n') - b_lines = b.split('\n') - - if len(a_lines) == 1 or len(a_lines[0]) > 50: - template_ = template_.replace("__PRIMARY_HEADER__", std(a[:20])) - template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion(a))) - else: - template_ = template_.replace("__PRIMARY_HEADER__", std(a_lines[0])) - template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion('\n'.join(a_lines[1:])))) - - if len(b_lines) == 1 or len(b_lines[0]) > 50: - template_ = template_.replace("__SECONDARY_HEADER__", std(b[:20])) - template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion(b))) - else: - template_ = template_.replace("__SECONDARY_HEADER__", std(b_lines[0])) - template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion('\n'.join(b_lines[1:])))) - self.html_string += template_ - - def save_file(self, file_name): - from toolbox import get_log_folder - with open('crazy_functions/pdf_fns/report_template.html', 'r', encoding='utf8') as f: - html_template = f.read() - html_template = html_template.replace("__TF_ARR__", self.html_string) - with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f: - f.write(html_template.encode('utf-8', 'ignore').decode()) - return os.path.join(get_log_folder(), file_name) diff --git a/crazy_functions/pdf_fns/report_template.html b/crazy_functions/pdf_fns/report_template.html deleted file mode 100644 index 39a1e7ce482949978ff90c4738a9adb8803660e6..0000000000000000000000000000000000000000 --- a/crazy_functions/pdf_fns/report_template.html +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - __TITLE__ - - - - - -
-

文章目录

- -
- - - diff --git a/crazy_functions/test_project/cpp/libJPG/JpegLibrary.tps b/crazy_functions/test_project/cpp/libJPG/JpegLibrary.tps new file mode 100644 index 0000000000000000000000000000000000000000..d5fcc9a98e78d3082abbefd0ce290de86159bac6 --- /dev/null +++ b/crazy_functions/test_project/cpp/libJPG/JpegLibrary.tps @@ -0,0 +1,15 @@ + + + Jpeg Library + /Engine/Source/ThirdParty/libJPG/ + 2016-06-10T14:04:17.9005402-04:00 + We need it because it is a 3rd party lib in GFx + + See license in download: http://www.ijg.org/ + + Licensees + Git + P4 + + /Engine/Source/ThirdParty/Licenses/JPEG_License.txt + \ No newline at end of file diff --git a/crazy_functions/test_project/cpp/libJPG/UElibJPG.Build.cs b/crazy_functions/test_project/cpp/libJPG/UElibJPG.Build.cs new file mode 100644 index 0000000000000000000000000000000000000000..01ca25dce97e8e3bf6dd4fba43416a66262bdb12 --- /dev/null +++ b/crazy_functions/test_project/cpp/libJPG/UElibJPG.Build.cs @@ -0,0 +1,17 @@ +// Copyright Epic Games, Inc. All Rights Reserved. + +using UnrealBuildTool; + +public class UElibJPG : ModuleRules +{ + public UElibJPG(ReadOnlyTargetRules Target) : base(Target) + { + Type = ModuleType.External; + + string libJPGPath = Target.UEThirdPartySourceDirectory + "libJPG"; + PublicIncludePaths.Add(libJPGPath); + + ShadowVariableWarningLevel = WarningLevel.Off; + } +} + diff --git a/crazy_functions/test_project/cpp/libJPG/jpeg-compressor.tps b/crazy_functions/test_project/cpp/libJPG/jpeg-compressor.tps new file mode 100644 index 0000000000000000000000000000000000000000..4e89d745ef4e121bb7ba411b49f7bdabbbb820db --- /dev/null +++ b/crazy_functions/test_project/cpp/libJPG/jpeg-compressor.tps @@ -0,0 +1,15 @@ + + + jpeg-compressor + /Engine/Source/ThirdParty/libJPG/ + 2016-06-10T14:07:13.8351319-04:00 + Allows JPEG compression and decompression. + Compressing video frames at runtime for reduced memory usage. Decompression to access the data afterwards. + https://code.google.com/archive/p/jpeg-compressor/ + + Licensees + Git + P4 + + None + \ No newline at end of file diff --git a/crazy_functions/test_project/cpp/longcode/jpgd.cpp b/crazy_functions/test_project/cpp/longcode/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/crazy_functions/test_project/cpp/longcode/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/crazy_functions/test_project/cpp/longcode/jpge.cpp b/crazy_functions/test_project/cpp/longcode/jpge.cpp deleted file mode 100644 index 2e26b71ed5aad0d46478fdbcd3a880be1401f946..0000000000000000000000000000000000000000 --- a/crazy_functions/test_project/cpp/longcode/jpge.cpp +++ /dev/null @@ -1,1049 +0,0 @@ -// jpge.cpp - C++ class for JPEG compression. -// Public domain, Rich Geldreich -// v1.01, Dec. 18, 2010 - Initial release -// v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.) -// v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc. -// Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03). -// v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug. -// Code tweaks to fix VS2008 static code analysis warnings (all looked harmless). -// Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02. - -#include "jpge.h" - -#include -#include -#if PLATFORM_WINDOWS -#include -#endif - -#define JPGE_MAX(a,b) (((a)>(b))?(a):(b)) -#define JPGE_MIN(a,b) (((a)<(b))?(a):(b)) - -namespace jpge { - -static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); } -static inline void jpge_free(void *p) { FMemory::Free(p);; } - -// Various JPEG enums and tables. -enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 }; -enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 }; - -static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; -static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 }; -static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 }; -static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; -static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; -static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d }; -static uint8 s_ac_lum_val[AC_LUM_CODES] = -{ - 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0, - 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49, - 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, - 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5, - 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, - 0xf9,0xfa -}; -static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; -static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; -static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 }; -static uint8 s_ac_chroma_val[AC_CHROMA_CODES] = -{ - 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0, - 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48, - 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, - 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3, - 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, - 0xf9,0xfa -}; - -// Low-level helper functions. -template inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); } - -const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329; -static inline uint8 clamp(int i) { if (static_cast(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast(i); } - -static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--) - { - const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; - pDst[0] = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); - pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); - } -} - -static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--) - pDst[0] = static_cast((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); -} - -static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--) - { - const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; - pDst[0] = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); - pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); - } -} - -static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--) - pDst[0] = static_cast((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); -} - -static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels) -{ - for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; } -} - -// Forward DCT - DCT derived from jfdctint. -#define CONST_BITS 13 -#define ROW_BITS 2 -#define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n)) -#define DCT_MUL(var, c) (static_cast(var) * static_cast(c)) -#define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \ - int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \ - int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \ - int32 u1 = DCT_MUL(t12 + t13, 4433); \ - s2 = u1 + DCT_MUL(t13, 6270); \ - s6 = u1 + DCT_MUL(t12, -15137); \ - u1 = t4 + t7; \ - int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \ - int32 z5 = DCT_MUL(u3 + u4, 9633); \ - t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \ - t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \ - u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \ - u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \ - u3 += z5; u4 += z5; \ - s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3; - -static void DCT2D(int32 *p) -{ - int32 c, *q = p; - for (c = 7; c >= 0; c--, q += 8) - { - int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7]; - DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); - q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS); - q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS); - } - for (q = p, c = 7; c >= 0; c--, q++) - { - int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8]; - DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); - q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3); - q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3); - } -} - -struct sym_freq { uint m_key, m_sym_index; }; - -// Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values. -static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1) -{ - const uint cMaxPasses = 4; - uint32 hist[256 * cMaxPasses]; clear_obj(hist); - for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; } - sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1; - uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; - for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) - { - const uint32* pHist = &hist[pass << 8]; - uint offsets[256], cur_ofs = 0; - for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } - for (uint i = 0; i < num_syms; i++) - pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; - sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; - } - return pCur_syms; -} - -// calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. -static void calculate_minimum_redundancy(sym_freq *A, int n) -{ - int root, leaf, next, avbl, used, dpth; - if (n==0) return; else if (n==1) { A[0].m_key = 1; return; } - A[0].m_key += A[1].m_key; root = 0; leaf = 2; - for (next=1; next < n-1; next++) - { - if (leaf>=n || A[root].m_key=n || (root=0; next--) A[next].m_key = A[A[next].m_key].m_key+1; - avbl = 1; used = dpth = 0; root = n-2; next = n-1; - while (avbl>0) - { - while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; } - while (avbl>used) { A[next--].m_key = dpth; avbl--; } - avbl = 2*used; dpth++; used = 0; - } -} - -// Limits canonical Huffman code table's max code size to max_code_size. -static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) -{ - if (code_list_len <= 1) return; - - for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; - - uint32 total = 0; - for (int i = max_code_size; i > 0; i--) - total += (((uint32)pNum_codes[i]) << (max_code_size - i)); - - while (total != (1UL << max_code_size)) - { - pNum_codes[max_code_size]--; - for (int i = max_code_size - 1; i > 0; i--) - { - if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } - } - total--; - } -} - -// Generates an optimized offman table. -void jpeg_encoder::optimize_huffman_table(int table_num, int table_len) -{ - sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS]; - syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's - int num_used_syms = 1; - const uint32 *pSym_count = &m_huff_count[table_num][0]; - for (int i = 0; i < table_len; i++) - if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; } - sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1); - calculate_minimum_redundancy(pSyms, num_used_syms); - - // Count the # of symbols of each code size. - int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes); - for (int i = 0; i < num_used_syms; i++) - num_codes[pSyms[i].m_key]++; - - const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol) - huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT); - - // Compute m_huff_bits array, which contains the # of symbols per code size. - clear_obj(m_huff_bits[table_num]); - for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++) - m_huff_bits[table_num][i] = static_cast(num_codes[i]); - - // Remove the dummy symbol added above, which must be in largest bucket. - for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--) - { - if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; } - } - - // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest). - for (int i = num_used_syms - 1; i >= 1; i--) - m_huff_val[table_num][num_used_syms - 1 - i] = static_cast(pSyms[i].m_sym_index - 1); -} - -// JPEG marker generation. -void jpeg_encoder::emit_byte(uint8 i) -{ - m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i); -} - -void jpeg_encoder::emit_word(uint i) -{ - emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF)); -} - -void jpeg_encoder::emit_marker(int marker) -{ - emit_byte(uint8(0xFF)); emit_byte(uint8(marker)); -} - -// Emit JFIF marker -void jpeg_encoder::emit_jfif_app0() -{ - emit_marker(M_APP0); - emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1); - emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */ - emit_byte(0); - emit_byte(1); /* Major version */ - emit_byte(1); /* Minor version */ - emit_byte(0); /* Density unit */ - emit_word(1); - emit_word(1); - emit_byte(0); /* No thumbnail image */ - emit_byte(0); -} - -// Emit quantization tables -void jpeg_encoder::emit_dqt() -{ - for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++) - { - emit_marker(M_DQT); - emit_word(64 + 1 + 2); - emit_byte(static_cast(i)); - for (int j = 0; j < 64; j++) - emit_byte(static_cast(m_quantization_tables[i][j])); - } -} - -// Emit start of frame marker -void jpeg_encoder::emit_sof() -{ - emit_marker(M_SOF0); /* baseline */ - emit_word(3 * m_num_components + 2 + 5 + 1); - emit_byte(8); /* precision */ - emit_word(m_image_y); - emit_word(m_image_x); - emit_byte(m_num_components); - for (int i = 0; i < m_num_components; i++) - { - emit_byte(static_cast(i + 1)); /* component ID */ - emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */ - emit_byte(i > 0); /* quant. table num */ - } -} - -// Emit Huffman table. -void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag) -{ - emit_marker(M_DHT); - - int length = 0; - for (int i = 1; i <= 16; i++) - length += bits[i]; - - emit_word(length + 2 + 1 + 16); - emit_byte(static_cast(index + (ac_flag << 4))); - - for (int i = 1; i <= 16; i++) - emit_byte(bits[i]); - - for (int i = 0; i < length; i++) - emit_byte(val[i]); -} - -// Emit all Huffman tables. -void jpeg_encoder::emit_dhts() -{ - emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false); - emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true); - if (m_num_components == 3) - { - emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false); - emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true); - } -} - -// emit start of scan -void jpeg_encoder::emit_sos() -{ - emit_marker(M_SOS); - emit_word(2 * m_num_components + 2 + 1 + 3); - emit_byte(m_num_components); - for (int i = 0; i < m_num_components; i++) - { - emit_byte(static_cast(i + 1)); - if (i == 0) - emit_byte((0 << 4) + 0); - else - emit_byte((1 << 4) + 1); - } - emit_byte(0); /* spectral selection */ - emit_byte(63); - emit_byte(0); -} - -// Emit all markers at beginning of image file. -void jpeg_encoder::emit_markers() -{ - emit_marker(M_SOI); - emit_jfif_app0(); - emit_dqt(); - emit_sof(); - emit_dhts(); - emit_sos(); -} - -// Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays. -void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val) -{ - int i, l, last_p, si; - uint8 huff_size[257]; - uint huff_code[257]; - uint code; - - int p = 0; - for (l = 1; l <= 16; l++) - for (i = 1; i <= bits[l]; i++) - huff_size[p++] = (char)l; - - huff_size[p] = 0; last_p = p; // write sentinel - - code = 0; si = huff_size[0]; p = 0; - - while (huff_size[p]) - { - while (huff_size[p] == si) - huff_code[p++] = code++; - code <<= 1; - si++; - } - - memset(codes, 0, sizeof(codes[0])*256); - memset(code_sizes, 0, sizeof(code_sizes[0])*256); - for (p = 0; p < last_p; p++) - { - codes[val[p]] = huff_code[p]; - code_sizes[val[p]] = huff_size[p]; - } -} - -// Quantization table generation. -void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc) -{ - int32 q; - if (m_params.m_quality < 50) - q = 5000 / m_params.m_quality; - else - q = 200 - m_params.m_quality * 2; - for (int i = 0; i < 64; i++) - { - int32 j = *pSrc++; j = (j * q + 50L) / 100L; - *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255); - } -} - -// Higher-level methods. -void jpeg_encoder::first_pass_init() -{ - m_bit_buffer = 0; m_bits_in = 0; - memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0])); - m_mcu_y_ofs = 0; - m_pass_num = 1; -} - -bool jpeg_encoder::second_pass_init() -{ - compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]); - compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]); - if (m_num_components > 1) - { - compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]); - compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]); - } - first_pass_init(); - emit_markers(); - m_pass_num = 2; - return true; -} - -bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels) -{ - m_num_components = 3; - switch (m_params.m_subsampling) - { - case Y_ONLY: - { - m_num_components = 1; - m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; - m_mcu_x = 8; m_mcu_y = 8; - break; - } - case H1V1: - { - m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 8; m_mcu_y = 8; - break; - } - case H2V1: - { - m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 16; m_mcu_y = 8; - break; - } - case H2V2: - { - m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 16; m_mcu_y = 16; - } - } - - m_image_x = p_x_res; m_image_y = p_y_res; - m_image_bpp = src_channels; - m_image_bpl = m_image_x * src_channels; - m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1)); - m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1)); - m_image_bpl_xlt = m_image_x * m_num_components; - m_image_bpl_mcu = m_image_x_mcu * m_num_components; - m_mcus_per_row = m_image_x_mcu / m_mcu_x; - - if ((m_mcu_lines[0] = static_cast(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false; - for (int i = 1; i < m_mcu_y; i++) - m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu; - - compute_quant_table(m_quantization_tables[0], s_std_lum_quant); - compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant); - - m_out_buf_left = JPGE_OUT_BUF_SIZE; - m_pOut_buf = m_out_buf; - - if (m_params.m_two_pass_flag) - { - clear_obj(m_huff_count); - first_pass_init(); - } - else - { - memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES); - memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES); - memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES); - memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES); - if (!second_pass_init()) return false; // in effect, skip over the first pass - } - return m_all_stream_writes_succeeded; -} - -void jpeg_encoder::load_block_8_8_grey(int x) -{ - uint8 *pSrc; - sample_array_t *pDst = m_sample_array; - x <<= 3; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc = m_mcu_lines[i] + x; - pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128; - pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128; - } -} - -void jpeg_encoder::load_block_8_8(int x, int y, int c) -{ - uint8 *pSrc; - sample_array_t *pDst = m_sample_array; - x = (x * (8 * 3)) + c; - y <<= 3; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc = m_mcu_lines[y + i] + x; - pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128; - pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128; - } -} - -void jpeg_encoder::load_block_16_8(int x, int c) -{ - uint8 *pSrc1, *pSrc2; - sample_array_t *pDst = m_sample_array; - x = (x * (16 * 3)) + c; - int a = 0, b = 2; - for (int i = 0; i < 16; i += 2, pDst += 8) - { - pSrc1 = m_mcu_lines[i + 0] + x; - pSrc2 = m_mcu_lines[i + 1] + x; - pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128; - pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128; - pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128; - pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128; - int temp = a; a = b; b = temp; - } -} - -void jpeg_encoder::load_block_16_8_8(int x, int c) -{ - uint8 *pSrc1; - sample_array_t *pDst = m_sample_array; - x = (x * (16 * 3)) + c; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc1 = m_mcu_lines[i + 0] + x; - pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128; - pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128; - pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128; - pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128; - } -} - -void jpeg_encoder::load_quantized_coefficients(int component_num) -{ - int32 *q = m_quantization_tables[component_num > 0]; - int16 *pDst = m_coefficient_array; - for (int i = 0; i < 64; i++) - { - sample_array_t j = m_sample_array[s_zag[i]]; - if (j < 0) - { - if ((j = -j + (*q >> 1)) < *q) - *pDst++ = 0; - else - *pDst++ = static_cast(-(j / *q)); - } - else - { - if ((j = j + (*q >> 1)) < *q) - *pDst++ = 0; - else - *pDst++ = static_cast((j / *q)); - } - q++; - } -} - -void jpeg_encoder::flush_output_buffer() -{ - if (m_out_buf_left != JPGE_OUT_BUF_SIZE) - m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left); - m_pOut_buf = m_out_buf; - m_out_buf_left = JPGE_OUT_BUF_SIZE; -} - -void jpeg_encoder::put_bits(uint bits, uint len) -{ - m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len))); - while (m_bits_in >= 8) - { - uint8 c; - #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); } - JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF)); - if (c == 0xFF) JPGE_PUT_BYTE(0); - m_bit_buffer <<= 8; - m_bits_in -= 8; - } -} - -void jpeg_encoder::code_coefficients_pass_one(int component_num) -{ - if (component_num >= 3) return; // just to shut up static analysis - int i, run_len, nbits, temp1; - int16 *src = m_coefficient_array; - uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0]; - - temp1 = src[0] - m_last_dc_val[component_num]; - m_last_dc_val[component_num] = src[0]; - if (temp1 < 0) temp1 = -temp1; - - nbits = 0; - while (temp1) - { - nbits++; temp1 >>= 1; - } - - dc_count[nbits]++; - for (run_len = 0, i = 1; i < 64; i++) - { - if ((temp1 = m_coefficient_array[i]) == 0) - run_len++; - else - { - while (run_len >= 16) - { - ac_count[0xF0]++; - run_len -= 16; - } - if (temp1 < 0) temp1 = -temp1; - nbits = 1; - while (temp1 >>= 1) nbits++; - ac_count[(run_len << 4) + nbits]++; - run_len = 0; - } - } - if (run_len) ac_count[0]++; -} - -void jpeg_encoder::code_coefficients_pass_two(int component_num) -{ - int i, j, run_len, nbits, temp1, temp2; - int16 *pSrc = m_coefficient_array; - uint *codes[2]; - uint8 *code_sizes[2]; - - if (component_num == 0) - { - codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0]; - code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0]; - } - else - { - codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1]; - code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1]; - } - - temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num]; - m_last_dc_val[component_num] = pSrc[0]; - - if (temp1 < 0) - { - temp1 = -temp1; temp2--; - } - - nbits = 0; - while (temp1) - { - nbits++; temp1 >>= 1; - } - - put_bits(codes[0][nbits], code_sizes[0][nbits]); - if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits); - - for (run_len = 0, i = 1; i < 64; i++) - { - if ((temp1 = m_coefficient_array[i]) == 0) - run_len++; - else - { - while (run_len >= 16) - { - put_bits(codes[1][0xF0], code_sizes[1][0xF0]); - run_len -= 16; - } - if ((temp2 = temp1) < 0) - { - temp1 = -temp1; - temp2--; - } - nbits = 1; - while (temp1 >>= 1) - nbits++; - j = (run_len << 4) + nbits; - put_bits(codes[1][j], code_sizes[1][j]); - put_bits(temp2 & ((1 << nbits) - 1), nbits); - run_len = 0; - } - } - if (run_len) - put_bits(codes[1][0], code_sizes[1][0]); -} - -void jpeg_encoder::code_block(int component_num) -{ - DCT2D(m_sample_array); - load_quantized_coefficients(component_num); - if (m_pass_num == 1) - code_coefficients_pass_one(component_num); - else - code_coefficients_pass_two(component_num); -} - -void jpeg_encoder::process_mcu_row() -{ - if (m_num_components == 1) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8_grey(i); code_block(0); - } - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2); - } - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); - load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2); - } - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); - load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0); - load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2); - } - } -} - -bool jpeg_encoder::terminate_pass_one() -{ - optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES); - if (m_num_components > 1) - { - optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES); - } - return second_pass_init(); -} - -bool jpeg_encoder::terminate_pass_two() -{ - put_bits(0x7F, 7); - flush_output_buffer(); - emit_marker(M_EOI); - m_pass_num++; // purposely bump up m_pass_num, for debugging - return true; -} - -bool jpeg_encoder::process_end_of_image() -{ - if (m_mcu_y_ofs) - { - if (m_mcu_y_ofs < 16) // check here just to shut up static analysis - { - for (int i = m_mcu_y_ofs; i < m_mcu_y; i++) - memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu); - } - - process_mcu_row(); - } - - if (m_pass_num == 1) - return terminate_pass_one(); - else - return terminate_pass_two(); -} - -void jpeg_encoder::load_mcu(const void *pSrc) -{ - const uint8* Psrc = reinterpret_cast(pSrc); - - uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst - - if (m_num_components == 1) - { - if (m_image_bpp == 4) - RGBA_to_Y(pDst, Psrc, m_image_x); - else if (m_image_bpp == 3) - RGB_to_Y(pDst, Psrc, m_image_x); - else - memcpy(pDst, Psrc, m_image_x); - } - else - { - if (m_image_bpp == 4) - RGBA_to_YCC(pDst, Psrc, m_image_x); - else if (m_image_bpp == 3) - RGB_to_YCC(pDst, Psrc, m_image_x); - else - Y_to_YCC(pDst, Psrc, m_image_x); - } - - // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16 - if (m_num_components == 1) - memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x); - else - { - const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2]; - uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt; - for (int i = m_image_x; i < m_image_x_mcu; i++) - { - *q++ = y; *q++ = cb; *q++ = cr; - } - } - - if (++m_mcu_y_ofs == m_mcu_y) - { - process_mcu_row(); - m_mcu_y_ofs = 0; - } -} - -void jpeg_encoder::clear() -{ - m_mcu_lines[0] = NULL; - m_pass_num = 0; - m_all_stream_writes_succeeded = true; -} - -jpeg_encoder::jpeg_encoder() -{ - clear(); -} - -jpeg_encoder::~jpeg_encoder() -{ - deinit(); -} - -bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params) -{ - deinit(); - if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false; - m_pStream = pStream; - m_params = comp_params; - return jpg_open(width, height, src_channels); -} - -void jpeg_encoder::deinit() -{ - jpge_free(m_mcu_lines[0]); - clear(); -} - -bool jpeg_encoder::process_scanline(const void* pScanline) -{ - if ((m_pass_num < 1) || (m_pass_num > 2)) return false; - if (m_all_stream_writes_succeeded) - { - if (!pScanline) - { - if (!process_end_of_image()) return false; - } - else - { - load_mcu(pScanline); - } - } - return m_all_stream_writes_succeeded; -} - -// Higher level wrappers/examples (optional). -#include - -class cfile_stream : public output_stream -{ - cfile_stream(const cfile_stream &); - cfile_stream &operator= (const cfile_stream &); - - FILE* m_pFile; - bool m_bStatus; - -public: - cfile_stream() : m_pFile(NULL), m_bStatus(false) { } - - virtual ~cfile_stream() - { - close(); - } - - bool open(const char *pFilename) - { - close(); -#if defined(_MSC_VER) - if (fopen_s(&m_pFile, pFilename, "wb") != 0) - { - return false; - } -#else - m_pFile = fopen(pFilename, "wb"); -#endif - m_bStatus = (m_pFile != NULL); - return m_bStatus; - } - - bool close() - { - if (m_pFile) - { - if (fclose(m_pFile) == EOF) - { - m_bStatus = false; - } - m_pFile = NULL; - } - return m_bStatus; - } - - virtual bool put_buf(const void* pBuf, int64_t len) - { - m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1); - return m_bStatus; - } - - uint get_size() const - { - return m_pFile ? ftell(m_pFile) : 0; - } -}; - -// Writes JPEG image to file. -bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params) -{ - cfile_stream dst_stream; - if (!dst_stream.open(pFilename)) - return false; - - jpge::jpeg_encoder dst_image; - if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) - return false; - - for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) - { - for (int64_t i = 0; i < height; i++) - { - // i, width, and num_channels are all 64bit - const uint8* pBuf = pImage_data + i * width * num_channels; - if (!dst_image.process_scanline(pBuf)) - return false; - } - if (!dst_image.process_scanline(NULL)) - return false; - } - - dst_image.deinit(); - - return dst_stream.close(); -} - -class memory_stream : public output_stream -{ - memory_stream(const memory_stream &); - memory_stream &operator= (const memory_stream &); - - uint8 *m_pBuf; - uint64_t m_buf_size, m_buf_ofs; - -public: - memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { } - - virtual ~memory_stream() { } - - virtual bool put_buf(const void* pBuf, int64_t len) - { - uint64_t buf_remaining = m_buf_size - m_buf_ofs; - if ((uint64_t)len > buf_remaining) - return false; - memcpy(m_pBuf + m_buf_ofs, pBuf, len); - m_buf_ofs += len; - return true; - } - - uint64_t get_size() const - { - return m_buf_ofs; - } -}; - -bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params) -{ - if ((!pDstBuf) || (!buf_size)) - return false; - - memory_stream dst_stream(pDstBuf, buf_size); - - buf_size = 0; - - jpge::jpeg_encoder dst_image; - if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) - return false; - - for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) - { - for (int64_t i = 0; i < height; i++) - { - const uint8* pScanline = pImage_data + i * width * num_channels; - if (!dst_image.process_scanline(pScanline)) - return false; - } - if (!dst_image.process_scanline(NULL)) - return false; - } - - dst_image.deinit(); - - buf_size = dst_stream.get_size(); - return true; -} - -} // namespace jpge \ No newline at end of file diff --git a/crazy_functions/test_project/cpp/longcode/prod_cons.h b/crazy_functions/test_project/cpp/longcode/prod_cons.h deleted file mode 100644 index 28d99bdaab941a54a81a41846c14d7dcc6f385af..0000000000000000000000000000000000000000 --- a/crazy_functions/test_project/cpp/longcode/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/crazy_functions/vector_fns/__init__.py b/crazy_functions/vector_fns/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/crazy_functions/vector_fns/general_file_loader.py b/crazy_functions/vector_fns/general_file_loader.py deleted file mode 100644 index a512c483c8d1048fbb9e517fac7bcc0f83bfdb88..0000000000000000000000000000000000000000 --- a/crazy_functions/vector_fns/general_file_loader.py +++ /dev/null @@ -1,70 +0,0 @@ -# From project chatglm-langchain - - -from langchain.document_loaders import UnstructuredFileLoader -from langchain.text_splitter import CharacterTextSplitter -import re -from typing import List - -class ChineseTextSplitter(CharacterTextSplitter): - def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs): - super().__init__(**kwargs) - self.pdf = pdf - self.sentence_size = sentence_size - - def split_text1(self, text: str) -> List[str]: - if self.pdf: - text = re.sub(r"\n{3,}", "\n", text) - text = re.sub('\s', ' ', text) - text = text.replace("\n\n", "") - sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :; - sent_list = [] - for ele in sent_sep_pattern.split(text): - if sent_sep_pattern.match(ele) and sent_list: - sent_list[-1] += ele - elif ele: - sent_list.append(ele) - return sent_list - - def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑 - if self.pdf: - text = re.sub(r"\n{3,}", r"\n", text) - text = re.sub('\s', " ", text) - text = re.sub("\n\n", "", text) - - text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符 - text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号 - text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号 - text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text) - # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号 - text = text.rstrip() # 段尾如果有多余的\n就去掉它 - # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。 - ls = [i for i in text.split("\n") if i] - for ele in ls: - if len(ele) > self.sentence_size: - ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele) - ele1_ls = ele1.split("\n") - for ele_ele1 in ele1_ls: - if len(ele_ele1) > self.sentence_size: - ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1) - ele2_ls = ele_ele2.split("\n") - for ele_ele2 in ele2_ls: - if len(ele_ele2) > self.sentence_size: - ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2) - ele2_id = ele2_ls.index(ele_ele2) - ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[ - ele2_id + 1:] - ele_id = ele1_ls.index(ele_ele1) - ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:] - - id = ls.index(ele) - ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:] - return ls - -def load_file(filepath, sentence_size): - loader = UnstructuredFileLoader(filepath, mode="elements") - textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size) - docs = loader.load_and_split(text_splitter=textsplitter) - # write_check_file(filepath, docs) - return docs - diff --git a/crazy_functions/vector_fns/vector_database.py b/crazy_functions/vector_fns/vector_database.py deleted file mode 100644 index cffa22cfeff3ccf9d3071b5cf534f5b6be22870f..0000000000000000000000000000000000000000 --- a/crazy_functions/vector_fns/vector_database.py +++ /dev/null @@ -1,338 +0,0 @@ -# From project chatglm-langchain - -import threading -from toolbox import Singleton -import os -import shutil -import os -import uuid -import tqdm -from langchain.vectorstores import FAISS -from langchain.docstore.document import Document -from typing import List, Tuple -import numpy as np -from crazy_functions.vector_fns.general_file_loader import load_file - -embedding_model_dict = { - "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", - "ernie-base": "nghuyong/ernie-3.0-base-zh", - "text2vec-base": "shibing624/text2vec-base-chinese", - "text2vec": "GanymedeNil/text2vec-large-chinese", -} - -# Embedding model name -EMBEDDING_MODEL = "text2vec" - -# Embedding running device -EMBEDDING_DEVICE = "cpu" - -# 基于上下文的prompt模版,请务必保留"{question}"和"{context}" -PROMPT_TEMPLATE = """已知信息: -{context} - -根据上述已知信息,简洁和专业的来回答用户的问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”,不允许在答案中添加编造成分,答案请使用中文。 问题是:{question}""" - -# 文本分句长度 -SENTENCE_SIZE = 100 - -# 匹配后单段上下文长度 -CHUNK_SIZE = 250 - -# LLM input history length -LLM_HISTORY_LEN = 3 - -# return top-k text chunk from vector store -VECTOR_SEARCH_TOP_K = 5 - -# 知识检索内容相关度 Score, 数值范围约为0-1100,如果为0,则不生效,经测试设置为小于500时,匹配结果更精准 -VECTOR_SEARCH_SCORE_THRESHOLD = 0 - -NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data") - -FLAG_USER_NAME = uuid.uuid4().hex - -# 是否开启跨域,默认为False,如果需要开启,请设置为True -# is open cross domain -OPEN_CROSS_DOMAIN = False - -def similarity_search_with_score_by_vector( - self, embedding: List[float], k: int = 4 -) -> List[Tuple[Document, float]]: - - def seperate_list(ls: List[int]) -> List[List[int]]: - lists = [] - ls1 = [ls[0]] - for i in range(1, len(ls)): - if ls[i - 1] + 1 == ls[i]: - ls1.append(ls[i]) - else: - lists.append(ls1) - ls1 = [ls[i]] - lists.append(ls1) - return lists - - scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k) - docs = [] - id_set = set() - store_len = len(self.index_to_docstore_id) - for j, i in enumerate(indices[0]): - if i == -1 or 0 < self.score_threshold < scores[0][j]: - # This happens when not enough docs are returned. - continue - _id = self.index_to_docstore_id[i] - doc = self.docstore.search(_id) - if not self.chunk_conent: - if not isinstance(doc, Document): - raise ValueError(f"Could not find document for id {_id}, got {doc}") - doc.metadata["score"] = int(scores[0][j]) - docs.append(doc) - continue - id_set.add(i) - docs_len = len(doc.page_content) - for k in range(1, max(i, store_len - i)): - break_flag = False - for l in [i + k, i - k]: - if 0 <= l < len(self.index_to_docstore_id): - _id0 = self.index_to_docstore_id[l] - doc0 = self.docstore.search(_id0) - if docs_len + len(doc0.page_content) > self.chunk_size: - break_flag = True - break - elif doc0.metadata["source"] == doc.metadata["source"]: - docs_len += len(doc0.page_content) - id_set.add(l) - if break_flag: - break - if not self.chunk_conent: - return docs - if len(id_set) == 0 and self.score_threshold > 0: - return [] - id_list = sorted(list(id_set)) - id_lists = seperate_list(id_list) - for id_seq in id_lists: - for id in id_seq: - if id == id_seq[0]: - _id = self.index_to_docstore_id[id] - doc = self.docstore.search(_id) - else: - _id0 = self.index_to_docstore_id[id] - doc0 = self.docstore.search(_id0) - doc.page_content += " " + doc0.page_content - if not isinstance(doc, Document): - raise ValueError(f"Could not find document for id {_id}, got {doc}") - doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]]) - doc.metadata["score"] = int(doc_score) - docs.append(doc) - return docs - - -class LocalDocQA: - llm: object = None - embeddings: object = None - top_k: int = VECTOR_SEARCH_TOP_K - chunk_size: int = CHUNK_SIZE - chunk_conent: bool = True - score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD - - def init_cfg(self, - top_k=VECTOR_SEARCH_TOP_K, - ): - - self.llm = None - self.top_k = top_k - - def init_knowledge_vector_store(self, - filepath, - vs_path: str or os.PathLike = None, - sentence_size=SENTENCE_SIZE, - text2vec=None): - loaded_files = [] - failed_files = [] - if isinstance(filepath, str): - if not os.path.exists(filepath): - print("路径不存在") - return None - elif os.path.isfile(filepath): - file = os.path.split(filepath)[-1] - try: - docs = load_file(filepath, SENTENCE_SIZE) - print(f"{file} 已成功加载") - loaded_files.append(filepath) - except Exception as e: - print(e) - print(f"{file} 未能成功加载") - return None - elif os.path.isdir(filepath): - docs = [] - for file in tqdm(os.listdir(filepath), desc="加载文件"): - fullfilepath = os.path.join(filepath, file) - try: - docs += load_file(fullfilepath, SENTENCE_SIZE) - loaded_files.append(fullfilepath) - except Exception as e: - print(e) - failed_files.append(file) - - if len(failed_files) > 0: - print("以下文件未能成功加载:") - for file in failed_files: - print(f"{file}\n") - - else: - docs = [] - for file in filepath: - docs += load_file(file, SENTENCE_SIZE) - print(f"{file} 已成功加载") - loaded_files.append(file) - - if len(docs) > 0: - print("文件加载完毕,正在生成向量库") - if vs_path and os.path.isdir(vs_path): - try: - self.vector_store = FAISS.load_local(vs_path, text2vec) - self.vector_store.add_documents(docs) - except: - self.vector_store = FAISS.from_documents(docs, text2vec) - else: - self.vector_store = FAISS.from_documents(docs, text2vec) # docs 为Document列表 - - self.vector_store.save_local(vs_path) - return vs_path, loaded_files - else: - raise RuntimeError("文件加载失败,请检查文件格式是否正确") - - def get_loaded_file(self, vs_path): - ds = self.vector_store.docstore - return set([ds._dict[k].metadata['source'].split(vs_path)[-1] for k in ds._dict]) - - - # query 查询内容 - # vs_path 知识库路径 - # chunk_conent 是否启用上下文关联 - # score_threshold 搜索匹配score阈值 - # vector_search_top_k 搜索知识库内容条数,默认搜索5条结果 - # chunk_sizes 匹配单段内容的连接上下文长度 - def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent, - score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD, - vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE, - text2vec=None): - self.vector_store = FAISS.load_local(vs_path, text2vec) - self.vector_store.chunk_conent = chunk_conent - self.vector_store.score_threshold = score_threshold - self.vector_store.chunk_size = chunk_size - - embedding = self.vector_store.embedding_function.embed_query(query) - related_docs_with_score = similarity_search_with_score_by_vector(self.vector_store, embedding, k=vector_search_top_k) - - if not related_docs_with_score: - response = {"query": query, - "source_documents": []} - return response, "" - # prompt = f"{query}. You should answer this question using information from following documents: \n\n" - prompt = f"{query}. 你必须利用以下文档中包含的信息回答这个问题: \n\n---\n\n" - prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)]) - prompt += "\n\n---\n\n" - prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - # print(prompt) - response = {"query": query, "source_documents": related_docs_with_score} - return response, prompt - - - - -def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec): - for file in files: - assert os.path.exists(file), "输入文件不存在:" + file - import nltk - if NLTK_DATA_PATH not in nltk.data.path: nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path - local_doc_qa = LocalDocQA() - local_doc_qa.init_cfg() - filelist = [] - if not os.path.exists(os.path.join(vs_path, vs_id)): - os.makedirs(os.path.join(vs_path, vs_id)) - for file in files: - file_name = file.name if not isinstance(file, str) else file - filename = os.path.split(file_name)[-1] - shutil.copyfile(file_name, os.path.join(vs_path, vs_id, filename)) - filelist.append(os.path.join(vs_path, vs_id, filename)) - vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, os.path.join(vs_path, vs_id), sentence_size, text2vec) - - if len(loaded_files): - file_status = f"已添加 {'、'.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问" - else: - pass - # file_status = "文件未成功加载,请重新上传文件" - # print(file_status) - return local_doc_qa, vs_path - -@Singleton -class knowledge_archive_interface(): - def __init__(self) -> None: - self.threadLock = threading.Lock() - self.current_id = "" - self.kai_path = None - self.qa_handle = None - self.text2vec_large_chinese = None - - def get_chinese_text2vec(self): - if self.text2vec_large_chinese is None: - # < -------------------预热文本向量化模组--------------- > - from toolbox import ProxyNetworkActivate - print('Checking Text2vec ...') - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 - self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") - - return self.text2vec_large_chinese - - - def feed_archive(self, file_manifest, vs_path, id="default"): - self.threadLock.acquire() - # import uuid - self.current_id = id - self.qa_handle, self.kai_path = construct_vector_store( - vs_id=self.current_id, - vs_path=vs_path, - files=file_manifest, - sentence_size=100, - history=[], - one_conent="", - one_content_segmentation="", - text2vec = self.get_chinese_text2vec(), - ) - self.threadLock.release() - - def get_current_archive_id(self): - return self.current_id - - def get_loaded_file(self, vs_path): - return self.qa_handle.get_loaded_file(vs_path) - - def answer_with_archive_by_id(self, txt, id, vs_path): - self.threadLock.acquire() - if not self.current_id == id: - self.current_id = id - self.qa_handle, self.kai_path = construct_vector_store( - vs_id=self.current_id, - vs_path=vs_path, - files=[], - sentence_size=100, - history=[], - one_conent="", - one_content_segmentation="", - text2vec = self.get_chinese_text2vec(), - ) - VECTOR_SEARCH_SCORE_THRESHOLD = 0 - VECTOR_SEARCH_TOP_K = 4 - CHUNK_SIZE = 512 - resp, prompt = self.qa_handle.get_knowledge_based_conent_test( - query = txt, - vs_path = self.kai_path, - score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD, - vector_search_top_k=VECTOR_SEARCH_TOP_K, - chunk_conent=True, - chunk_size=CHUNK_SIZE, - text2vec = self.get_chinese_text2vec(), - ) - self.threadLock.release() - return resp, prompt \ No newline at end of file diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py deleted file mode 100644 index f33644d9ad61c29c6809198a4b0d7466a9d98e48..0000000000000000000000000000000000000000 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ /dev/null @@ -1,114 +0,0 @@ -from pydantic import BaseModel, Field -from typing import List -from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError -import copy, json, pickle, os, sys, time - - -def read_avail_plugin_enum(): - from crazy_functional import get_crazy_functions - plugin_arr = get_crazy_functions() - # remove plugins with out explaination - plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v} - plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)} - plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} - plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)} - plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)}) - prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2) - prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt - return prompt, plugin_arr_dict, plugin_arr_dict_parse - -def wrap_code(txt): - txt = txt.replace('```','') - return f"\n```\n{txt}\n```\n" - -def have_any_recent_upload_files(chatbot): - _5min = 5 * 60 - if not chatbot: return False # chatbot is None - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - if not most_recent_uploaded: return False # most_recent_uploaded is None - if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new - else: return False # most_recent_uploaded is too old - -def get_recent_file_prompt_support(chatbot): - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - path = most_recent_uploaded['path'] - prompt = "\nAdditional Information:\n" - prompt = "In case that this plugin requires a path or a file as argument," - prompt += f"it is important for you to know that the user has recently uploaded a file, located at: `{path}`" - prompt += f"Only use it when necessary, otherwise, you can ignore this file." - return prompt - -def get_inputs_show_user(inputs, plugin_arr_enum_prompt): - # remove plugin_arr_enum_prompt from inputs string - inputs_show_user = inputs.replace(plugin_arr_enum_prompt, "") - inputs_show_user += plugin_arr_enum_prompt[:200] + '...' - inputs_show_user += '\n...\n' - inputs_show_user += '...\n' - inputs_show_user += '...}' - return inputs_show_user - -def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - plugin_arr_enum_prompt, plugin_arr_dict, plugin_arr_dict_parse = read_avail_plugin_enum() - class Plugin(BaseModel): - plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000") - reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most") - # ⭐ ⭐ ⭐ 选择插件 - yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0) - gpt_json_io = GptJsonIO(Plugin) - gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n" - gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n""" - gpt_json_io.format_instructions += "The plugins you are authorized to use are listed below:\n" - gpt_json_io.format_instructions += plugin_arr_enum_prompt - inputs = "Choose the correct plugin according to user requirements, the user requirement is: \n\n" + \ - ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions - - run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - try: - gpt_reply = run_gpt_fn(inputs, "") - plugin_sel = gpt_json_io.generate_output_auto_repair(gpt_reply, run_gpt_fn) - except JsonStringError: - msg = f"抱歉, {llm_kwargs['llm_model']}无法理解您的需求。" - msg += "请求的Prompt为:\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt)) - msg += "语言模型回复为:\n" + wrap_code(gpt_reply) - msg += "\n但您可以尝试再试一次\n" - yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) - return - if plugin_sel.plugin_selection not in plugin_arr_dict_parse: - msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。" - msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply) - msg += "\n但您可以尝试再试一次\n" - yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) - return - - # ⭐ ⭐ ⭐ 确认插件参数 - if not have_any_recent_upload_files(chatbot): - appendix_info = "" - else: - appendix_info = get_recent_file_prompt_support(chatbot) - - plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection] - yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0) - class PluginExplicit(BaseModel): - plugin_selection: str = plugin_sel.plugin_selection - plugin_arg: str = Field(description="The argument of the plugin.", default="") - gpt_json_io = GptJsonIO(PluginExplicit) - gpt_json_io.format_instructions += "The information about this plugin is:" + plugin["Info"] - inputs = f"A plugin named {plugin_sel.plugin_selection} is selected, " + \ - "you should extract plugin_arg from the user requirement, the user requirement is: \n\n" + \ - ">> " + (txt + appendix_info).rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ - gpt_json_io.format_instructions - run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) - - - # ⭐ ⭐ ⭐ 执行插件 - fn = plugin['Function'] - fn_name = fn.__name__ - msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。' - yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2) - yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1) - return \ No newline at end of file diff --git a/crazy_functions/vt_fns/vt_modify_config.py b/crazy_functions/vt_fns/vt_modify_config.py deleted file mode 100644 index 58a8531e8ef8fff970ecc002ae2a0c71ec313a1d..0000000000000000000000000000000000000000 --- a/crazy_functions/vt_fns/vt_modify_config.py +++ /dev/null @@ -1,81 +0,0 @@ -from pydantic import BaseModel, Field -from typing import List -from toolbox import update_ui_lastest_msg, get_conf -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.json_fns.pydantic_io import GptJsonIO -import copy, json, pickle, os, sys - - -def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') - if not ALLOW_RESET_CONFIG: - yield from update_ui_lastest_msg( - lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", - chatbot=chatbot, history=history, delay=2 - ) - return - - # ⭐ ⭐ ⭐ 读取可配置项目条目 - names = {} - from enum import Enum - import config - for k, v in config.__dict__.items(): - if k.startswith('__'): continue - names.update({k:k}) - # if len(names) > 20: break # 限制最多前10个配置项,如果太多了会导致gpt无法理解 - - ConfigOptions = Enum('ConfigOptions', names) - class ModifyConfigurationIntention(BaseModel): - which_config_to_modify: ConfigOptions = Field(description="the name of the configuration to modify, you must choose from one of the ConfigOptions enum.", default=None) - new_option_value: str = Field(description="the new value of the option", default=None) - - # ⭐ ⭐ ⭐ 分析用户意图 - yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n读取新配置中", chatbot=chatbot, history=history, delay=0) - gpt_json_io = GptJsonIO(ModifyConfigurationIntention) - inputs = "Analyze how to change configuration according to following user input, answer me with json: \n\n" + \ - ">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \ - gpt_json_io.format_instructions - - run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn) - - explicit_conf = user_intention.which_config_to_modify.value - - ok = (explicit_conf in txt) - if ok: - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}", - chatbot=chatbot, history=history, delay=1 - ) - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中", - chatbot=chatbot, history=history, delay=2 - ) - - # ⭐ ⭐ ⭐ 立即应用配置 - from toolbox import set_conf - set_conf(explicit_conf, user_intention.new_option_value) - - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,重新页面即可生效。", chatbot=chatbot, history=history, delay=1 - ) - else: - yield from update_ui_lastest_msg( - lastmsg=f"失败,如果需要配置{explicit_conf},您需要明确说明并在指令中提到它。", chatbot=chatbot, history=history, delay=5 - ) - -def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') - if not ALLOW_RESET_CONFIG: - yield from update_ui_lastest_msg( - lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", - chatbot=chatbot, history=history, delay=2 - ) - return - - yield from modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,五秒后即将重启!若出现报错请无视即可。", chatbot=chatbot, history=history, delay=5 - ) - os.execl(sys.executable, sys.executable, *sys.argv) diff --git a/crazy_functions/vt_fns/vt_state.py b/crazy_functions/vt_fns/vt_state.py deleted file mode 100644 index 18187286383ce2f3e881510852cf3aba7e6c43d1..0000000000000000000000000000000000000000 --- a/crazy_functions/vt_fns/vt_state.py +++ /dev/null @@ -1,28 +0,0 @@ -import pickle - -class VoidTerminalState(): - def __init__(self): - self.reset_state() - - def reset_state(self): - self.has_provided_explaination = False - - def lock_plugin(self, chatbot): - chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端' - chatbot._cookies['plugin_state'] = pickle.dumps(self) - - def unlock_plugin(self, chatbot): - self.reset_state() - chatbot._cookies['lock_plugin'] = None - chatbot._cookies['plugin_state'] = pickle.dumps(self) - - def set_state(self, chatbot, key, value): - setattr(self, key, value) - chatbot._cookies['plugin_state'] = pickle.dumps(self) - - def get_state(chatbot): - state = chatbot._cookies.get('plugin_state', None) - if state is not None: state = pickle.loads(state) - else: state = VoidTerminalState() - state.chatbot = chatbot - return state \ No newline at end of file diff --git "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" deleted file mode 100644 index c368b7d66bced9c4ffde805f1f87d367d4c301ee..0000000000000000000000000000000000000000 --- "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ /dev/null @@ -1,191 +0,0 @@ -from toolbox import update_ui, get_log_folder -from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import CatchException, report_exception, get_conf -import re, requests, unicodedata, os -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -def download_arxiv_(url_pdf): - if 'arxiv.org' not in url_pdf: - if ('.' in url_pdf) and ('/' not in url_pdf): - new_url = 'https://arxiv.org/abs/'+url_pdf - print('下载编号:', url_pdf, '自动定位:', new_url) - # download_arxiv_(new_url) - return download_arxiv_(new_url) - else: - print('不能识别的URL!') - return None - if 'abs' in url_pdf: - url_pdf = url_pdf.replace('abs', 'pdf') - url_pdf = url_pdf + '.pdf' - - url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs') - title, other_info = get_name(_url_=url_abs) - - paper_id = title.split()[0] # '[1712.00559]' - if '2' in other_info['year']: - title = other_info['year'] + ' ' + title - - known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI'] - for k in known_conf: - if k in other_info['comment']: - title = k + ' ' + title - - download_dir = get_log_folder(plugin_name='arxiv') - os.makedirs(download_dir, exist_ok=True) - - title_str = title.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - - requests_pdf_url = url_pdf - file_path = download_dir+title_str - - print('下载中') - proxies = get_conf('proxies') - r = requests.get(requests_pdf_url, proxies=proxies) - with open(file_path, 'wb+') as f: - f.write(r.content) - print('下载完成') - - # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) - # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) - - x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) - x = x.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - return file_path, other_info - - -def get_name(_url_): - import os - from bs4 import BeautifulSoup - print('正在获取文献名!') - print(_url_) - - # arxiv_recall = {} - # if os.path.exists('./arxiv_recall.pkl'): - # with open('./arxiv_recall.pkl', 'rb') as f: - # arxiv_recall = pickle.load(f) - - # if _url_ in arxiv_recall: - # print('在缓存中') - # return arxiv_recall[_url_] - - proxies = get_conf('proxies') - res = requests.get(_url_, proxies=proxies) - - bs = BeautifulSoup(res.text, 'html.parser') - other_details = {} - - # get year - try: - year = bs.find_all(class_='dateline')[0].text - year = re.search(r'(\d{4})', year, re.M | re.I).group(1) - other_details['year'] = year - abstract = bs.find_all(class_='abstract mathjax')[0].text - other_details['abstract'] = abstract - except: - other_details['year'] = '' - print('年份获取失败') - - # get author - try: - authors = bs.find_all(class_='authors')[0].text - authors = authors.split('Authors:')[1] - other_details['authors'] = authors - except: - other_details['authors'] = '' - print('authors获取失败') - - # get comment - try: - comment = bs.find_all(class_='metatable')[0].text - real_comment = None - for item in comment.replace('\n', ' ').split(' '): - if 'Comments' in item: - real_comment = item - if real_comment is not None: - other_details['comment'] = real_comment - else: - other_details['comment'] = '' - except: - other_details['comment'] = '' - print('年份获取失败') - - title_str = BeautifulSoup( - res.text, 'html.parser').find('title').contents[0] - print('获取成功:', title_str) - # arxiv_recall[_url_] = (title_str+'.pdf', other_details) - # with open('./arxiv_recall.pkl', 'wb') as f: - # pickle.dump(arxiv_recall, f) - - return title_str+'.pdf', other_details - - - -@CatchException -def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - - CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……" - import glob - import os - - # 基本信息:功能、贡献者 - chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import bs4 - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 提取摘要,下载PDF文档 - try: - pdf_path, info = download_arxiv_(txt) - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"下载pdf文件未成功") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 翻译摘要等 - i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" - i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - msg = '正常' - # ** gpt request ** - # 单线,获取文章meta信息 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="Your job is to collect information from materials and translate to Chinese。", - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - promote_file_to_downloadzone(pdf_path, chatbot=chatbot) - - chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载")) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - diff --git "a/crazy_functions/\344\272\222\345\212\250\345\260\217\346\270\270\346\210\217.py" "b/crazy_functions/\344\272\222\345\212\250\345\260\217\346\270\270\346\210\217.py" deleted file mode 100644 index 131e9c91954d164f96b1826869eac6477fe4de5f..0000000000000000000000000000000000000000 --- "a/crazy_functions/\344\272\222\345\212\250\345\260\217\346\270\270\346\210\217.py" +++ /dev/null @@ -1,40 +0,0 @@ -from toolbox import CatchException, update_ui, update_ui_lastest_msg -from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing - -@CatchException -def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory - # 清空历史 - history = [] - # 选择游戏 - cls = MiniGame_ResumeStory - # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化 - state = cls.sync_state(chatbot, - llm_kwargs, - cls, - plugin_name='MiniGame_ResumeStory', - callback_fn='crazy_functions.互动小游戏->随机小游戏', - lock_plugin=True - ) - yield from state.continue_game(prompt, chatbot, history) - - -@CatchException -def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art - # 清空历史 - history = [] - # 选择游戏 - cls = MiniGame_ASCII_Art - # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化 - state = cls.sync_state(chatbot, - llm_kwargs, - cls, - plugin_name='MiniGame_ASCII_Art', - callback_fn='crazy_functions.互动小游戏->随机小游戏1', - lock_plugin=True - ) - yield from state.continue_game(prompt, chatbot, history) diff --git "a/crazy_functions/\344\272\244\344\272\222\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/crazy_functions/\344\272\244\344\272\222\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" deleted file mode 100644 index 811267a321e34896257b612f6797f095625bf962..0000000000000000000000000000000000000000 --- "a/crazy_functions/\344\272\244\344\272\222\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" +++ /dev/null @@ -1,63 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - - -@CatchException -def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态 - - if state is None: - chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数 - chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword' # 赋予插件状态 - - chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if state == 'wait_user_keyword': - chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁 - chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁 - - # 解除插件锁定 - chatbot.append((f"获取关键词:{txt}", "")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - page_return = get_image_page_by_keyword(txt) - inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=inputs, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'" - ) - chatbot[-1] = [chatbot[-1][0], gpt_say] - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - -# --------------------------------------------------------------------------------- - -def get_image_page_by_keyword(keyword): - import requests - from bs4 import BeautifulSoup - response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2) - res = "image urls: \n" - for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"): - try: - res += image_element["data-src"] - res += "\n" - except: - pass - return res diff --git "a/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" "b/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" deleted file mode 100644 index e57f80f1d45bd3ec23837253848f7b32a5ccd751..0000000000000000000000000000000000000000 --- "a/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" +++ /dev/null @@ -1,138 +0,0 @@ -import threading -from request_llm.bridge_all import predict_no_ui_long_connection -from toolbox import update_ui -from toolbox import CatchException, write_results_to_file, report_execption -from .crazy_utils import breakdown_txt_to_satisfy_token_limit - -def extract_code_block_carefully(txt): - splitted = txt.split('```') - n_code_block_seg = len(splitted) - 1 - if n_code_block_seg <= 1: return txt - # 剩下的情况都开头除去 ``` 结尾除去一次 ``` - txt_out = '```'.join(splitted[1:-1]) - return txt_out - - - -def break_txt_into_half_at_some_linebreak(txt): - lines = txt.split('\n') - n_lines = len(lines) - pre = lines[:(n_lines//2)] - post = lines[(n_lines//2):] - return "\n".join(pre), "\n".join(post) - - -@CatchException -def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port): - # 第1步:清空历史,以免输入溢出 - history = [] - - # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 第3步:集合文件 - import time, glob, os, shutil, re - os.makedirs('gpt_log/generated_english_version', exist_ok=True) - os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True) - file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] - # file_manifest = ['./toolbox.py'] - i_say_show_user_buffer = [] - - # 第4步:随便显示点什么防止卡顿的感觉 - for index, fp in enumerate(file_manifest): - # if 'test_project' in fp: continue - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}' - i_say_show_user_buffer.append(i_say_show_user) - chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - - # 第5步:Token限制下的截断与处理 - MAX_TOKEN = 3000 - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=())) - - - # 第6步:任务函数 - mutable_return = [None for _ in file_manifest] - observe_window = [[""] for _ in file_manifest] - def thread_worker(fp,index): - if index > 10: - time.sleep(60) - print('Openai 限制免费用户每分钟20次请求,降低请求频率中。') - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```' - try: - gpt_say = "" - # 分解代码文件 - file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN) - for file_content_partial in file_content_breakdown: - i_say = i_say_template(fp, file_content_partial) - # # ** gpt request ** - gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index]) - gpt_say_partial = extract_code_block_carefully(gpt_say_partial) - gpt_say += gpt_say_partial - mutable_return[index] = gpt_say - except ConnectionAbortedError as token_exceed_err: - print('至少一个线程任务Token溢出而失败', e) - except Exception as e: - print('至少一个线程任务意外失败', e) - - # 第7步:所有线程同时开始执行任务函数 - handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)] - for h in handles: - h.daemon = True - h.start() - chatbot.append(('开始了吗?', f'多线程操作已经开始')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 第8步:循环轮询各个线程是否执行完毕 - cnt = 0 - while True: - cnt += 1 - time.sleep(0.2) - th_alive = [h.is_alive() for h in handles] - if not any(th_alive): break - # 更好的UI视觉效果 - observe_win = [] - for thread_index, alive in enumerate(th_alive): - observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('
','.....').replace('$','.')+"... ]") - stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)] - stat_str = ''.join(stat) - chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1))) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 第9步:把结果写入文件 - for index, h in enumerate(handles): - h.join() # 这里其实不需要join了,肯定已经都结束了 - fp = file_manifest[index] - gpt_say = mutable_return[index] - i_say_show_user = i_say_show_user_buffer[index] - - where_to_relocate = f'gpt_log/generated_english_version/{fp}' - if gpt_say is not None: - with open(where_to_relocate, 'w+', encoding='utf-8') as f: - f.write(gpt_say) - else: # 失败 - shutil.copyfile(file_manifest[index], where_to_relocate) - chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}')) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(1) - - # 第10步:备份一个文件 - res = write_results_to_file(history) - chatbot.append(("生成一份任务执行报告", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git "a/crazy_functions/\345\207\275\346\225\260\345\212\250\346\200\201\347\224\237\346\210\220.py" "b/crazy_functions/\345\207\275\346\225\260\345\212\250\346\200\201\347\224\237\346\210\220.py" deleted file mode 100644 index d20d0cf579d2357306e040570f708d4f26a8912a..0000000000000000000000000000000000000000 --- "a/crazy_functions/\345\207\275\346\225\260\345\212\250\346\200\201\347\224\237\346\210\220.py" +++ /dev/null @@ -1,252 +0,0 @@ -# 本源代码中, ⭐ = 关键步骤 -""" -测试: - - 裁剪图像,保留下半部分 - - 交换图像的蓝色通道和红色通道 - - 将图像转为灰度图像 - - 将csv文件转excel表格 - -Testing: - - Crop the image, keeping the bottom half. - - Swap the blue channel and red channel of the image. - - Convert the image to grayscale. - - Convert the CSV file to an Excel spreadsheet. -""" - - -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder -from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg -from .crazy_utils import input_clipping, try_install_deps -from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated -from crazy_functions.gen_fns.gen_fns_shared import get_class_name -from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker -from crazy_functions.gen_fns.gen_fns_shared import try_make_module -import os -import time -import glob -import multiprocessing - -templete = """ -```python -import ... # Put dependencies here, e.g. import numpy as np. - -class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction` - - def run(self, path): # The name of the function must be `run`, it takes only a positional argument. - # rewrite the function you have just written here - ... - return generated_file_path -``` -""" - -def inspect_dependency(chatbot, history): - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return True - -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) == 1: - return matches[0].strip('python') # code block - for match in matches: - if 'class TerminalFunction' in match: - return match.strip('python') # code block - raise RuntimeError("GPT is not generating proper code.") - -def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history): - # 输入 - prompt_compose = [ - f'Your job:\n' - f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n', - f"2. You should write this function to perform following task: " + txt + "\n", - f"3. Wrap the output python function with markdown codeblock." - ] - i_say = "".join(prompt_compose) - demo = [] - - # 第一步 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, - sys_prompt= r"You are a world-class programmer." - ) - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # 第二步 - prompt_compose = [ - "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n", - templete - ] - i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. " - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!" - ) - code_to_return = gpt_say - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # # 第三步 - # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them." - # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`' - # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=inputs_show_user, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - # sys_prompt= r"You are a programmer." - # ) - - # # # 第三步 - # i_say = "Show me how to use `pip` to install packages to run the code above. " - # i_say += 'For instance. `pip install -r opencv-python scipy numpy`' - # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=i_say, inputs_show_user=i_say, - # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - # sys_prompt= r"You are a programmer." - # ) - installation_advance = "" - - return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history - - - - -def for_immediate_show_off_when_possible(file_type, fp, chatbot): - if file_type in ['png', 'jpg']: - image_path = os.path.abspath(fp) - chatbot.append(['这是一张图片, 展示如下:', - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - return chatbot - - - -def have_any_recent_upload_files(chatbot): - _5min = 5 * 60 - if not chatbot: return False # chatbot is None - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - if not most_recent_uploaded: return False # most_recent_uploaded is None - if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new - else: return False # most_recent_uploaded is too old - -def get_recent_file_prompt_support(chatbot): - most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) - path = most_recent_uploaded['path'] - return path - -@CatchException -def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - - # 清空历史 - history = [] - - # 基本信息:功能、贡献者 - chatbot.append(["正在启动: 插件动态生成插件", "插件动态生成, 执行开始, 作者Binary-Husky."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # ⭐ 文件上传区是否有东西 - # 1. 如果有文件: 作为函数参数 - # 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码) - file_list = [] - if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False): - file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None) - file_list.append(file_path) - yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1) - elif have_any_recent_upload_files(chatbot): - file_dir = get_recent_file_prompt_support(chatbot) - file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True) - yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1) - else: - chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) - yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) - return # 2. 如果没有文件 - if len(file_list) == 0: - chatbot.append(["文件检索", "没有发现任何近期上传的文件。"]) - yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1) - return # 2. 如果没有文件 - - # 读取文件 - file_type = file_list[0].split('.')[-1] - - # 粗心检查 - if is_the_upload_folder(txt): - yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1) - return - - # 开始干正事 - MAX_TRY = 3 - for j in range(MAX_TRY): # 最多重试5次 - traceback = "" - try: - # ⭐ 开始啦 ! - code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \ - yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history) - chatbot.append(["代码生成阶段结束", ""]) - yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1) - # ⭐ 分离代码块 - code = get_code_block(code) - # ⭐ 检查模块 - ok, traceback = try_make_module(code, chatbot) - # 搞定代码生成 - if ok: break - except Exception as e: - if not traceback: traceback = trimmed_format_exc() - # 处理异常 - if not traceback: traceback = trimmed_format_exc() - yield from update_ui_lastest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5) - - # 代码生成结束, 开始执行 - TIME_LIMIT = 15 - yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1) - manager = multiprocessing.Manager() - return_dict = manager.dict() - - # ⭐ 到最后一步了,开始逐个文件进行处理 - for file_path in file_list: - if os.path.exists(file_path): - chatbot.append([f"正在处理文件: {file_path}", f"请稍等..."]) - chatbot = for_immediate_show_off_when_possible(file_type, file_path, chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - else: - continue - - # ⭐⭐⭐ subprocess_worker ⭐⭐⭐ - p = multiprocessing.Process(target=subprocess_worker, args=(code, file_path, return_dict)) - # ⭐ 开始执行,时间限制TIME_LIMIT - p.start(); p.join(timeout=TIME_LIMIT) - if p.is_alive(): p.terminate(); p.join() - p.close() - res = return_dict['result'] - success = return_dict['success'] - traceback = return_dict['traceback'] - if not success: - if not traceback: traceback = trimmed_format_exc() - chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"]) - # chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 顺利完成,收尾 - res = str(res) - if os.path.exists(res): - chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res]) - new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - else: - chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git "a/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" "b/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" deleted file mode 100644 index 286952445a1f4f262e47fec82a3f61243f85b5e5..0000000000000000000000000000000000000000 --- "a/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" +++ /dev/null @@ -1,31 +0,0 @@ -from toolbox import CatchException, update_ui, gen_time_str -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping -import copy, json - -@CatchException -def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数, 暂时没有用武之地 - chatbot 聊天显示框的句柄, 用于显示给用户 - history 聊天历史, 前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - # 清空历史, 以免输入溢出 - history = [] - - # 输入 - i_say = "请写bash命令实现以下功能:" + txt - # 开始 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你是一个Linux大师级用户。注意,当我要求你写bash命令时,尽可能地仅用一行命令解决我的要求。" - ) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - - diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" deleted file mode 100644 index 62f36626143c29a6c31f20a0067e2775ad870ce6..0000000000000000000000000000000000000000 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ /dev/null @@ -1,276 +0,0 @@ -from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder -from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState - - -def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None, style=None): - import requests, json, time, os - from request_llms.bridge_all import model_info - - proxies = get_conf('proxies') - # Set up OpenAI API key and model - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - # 'https://api.openai.com/v1/chat/completions' - img_endpoint = chat_endpoint.replace('chat/completions','images/generations') - # # Generate the image - url = img_endpoint - headers = { - 'Authorization': f"Bearer {api_key}", - 'Content-Type': 'application/json' - } - data = { - 'prompt': prompt, - 'n': 1, - 'size': resolution, - 'model': model, - 'response_format': 'url' - } - if quality is not None: - data['quality'] = quality - if style is not None: - data['style'] = style - response = requests.post(url, headers=headers, json=data, proxies=proxies) - print(response.content) - try: - image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] - except: - raise RuntimeError(response.content.decode()) - # 文件保存到本地 - r = requests.get(image_url, proxies=proxies) - file_path = f'{get_log_folder()}/image_gen/' - os.makedirs(file_path, exist_ok=True) - file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' - with open(file_path+file_name, 'wb+') as f: f.write(r.content) - - - return image_url, file_path+file_name - - -def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="dall-e-2"): - import requests, json, time, os - from request_llms.bridge_all import model_info - - proxies = get_conf('proxies') - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - # 'https://api.openai.com/v1/chat/completions' - img_endpoint = chat_endpoint.replace('chat/completions','images/edits') - # # Generate the image - url = img_endpoint - n = 1 - headers = { - 'Authorization': f"Bearer {api_key}", - } - make_transparent(image_path, image_path+'.tsp.png') - make_square_image(image_path+'.tsp.png', image_path+'.tspsq.png') - resize_image(image_path+'.tspsq.png', image_path+'.ready.png', max_size=1024) - image_path = image_path+'.ready.png' - with open(image_path, 'rb') as f: - file_content = f.read() - files = { - 'image': (os.path.basename(image_path), file_content), - # 'mask': ('mask.png', open('mask.png', 'rb')) - 'prompt': (None, prompt), - "n": (None, str(n)), - 'size': (None, resolution), - } - - response = requests.post(url, headers=headers, files=files, proxies=proxies) - print(response.content) - try: - image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] - except: - raise RuntimeError(response.content.decode()) - # 文件保存到本地 - r = requests.get(image_url, proxies=proxies) - file_path = f'{get_log_folder()}/image_gen/' - os.makedirs(file_path, exist_ok=True) - file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' - with open(file_path+file_name, 'wb+') as f: f.write(r.content) - - - return image_url, file_path+file_name - - -@CatchException -def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - if prompt.strip() == "": - chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 - return - chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 .....")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image(llm_kwargs, prompt, resolution) - chatbot.append([prompt, - f'图像中转网址:
`{image_url}`
'+ - f'中转网址预览:
' - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 - - -@CatchException -def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - if prompt.strip() == "": - chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 - return - chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 .....")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower() - parts = resolution_arg.split('-') - resolution = parts[0] # 解析分辨率 - quality = 'standard' # 质量与风格默认值 - style = 'vivid' - # 遍历检查是否有额外参数 - for part in parts[1:]: - if part in ['hd', 'standard']: - quality = part - elif part in ['vivid', 'natural']: - style = part - image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality, style=style) - chatbot.append([prompt, - f'图像中转网址:
`{image_url}`
'+ - f'中转网址预览:
' - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 - - -class ImageEditState(GptAcademicState): - # 尚未完成 - def get_image_file(self, x): - import os, glob - if len(x) == 0: return False, None - if not os.path.exists(x): return False, None - if x.endswith('.png'): return True, x - file_manifest = [f for f in glob.glob(f'{x}/**/*.png', recursive=True)] - confirm = (len(file_manifest) >= 1 and file_manifest[0].endswith('.png') and os.path.exists(file_manifest[0])) - file = None if not confirm else file_manifest[0] - return confirm, file - - def lock_plugin(self, chatbot): - chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2' - self.dump_state(chatbot) - - def unlock_plugin(self, chatbot): - self.reset() - chatbot._cookies['lock_plugin'] = None - self.dump_state(chatbot) - - def get_resolution(self, x): - return (x in ['256x256', '512x512', '1024x1024']), x - - def get_prompt(self, x): - confirm = (len(x)>=5) and (not self.get_resolution(x)[0]) and (not self.get_image_file(x)[0]) - return confirm, x - - def reset(self): - self.req = [ - {'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file}, - {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024, 然后再次点击本插件', 'verify_fn': self.get_resolution}, - {'value':None, 'description': '请输入修改需求,建议您使用英文提示词, 然后再次点击本插件', 'verify_fn': self.get_prompt}, - ] - self.info = "" - - def feed(self, prompt, chatbot): - for r in self.req: - if r['value'] is None: - confirm, res = r['verify_fn'](prompt) - if confirm: - r['value'] = res - self.dump_state(chatbot) - break - return self - - def next_req(self): - for r in self.req: - if r['value'] is None: - return r['description'] - return "已经收集到所有信息" - - def already_obtained_all_materials(self): - return all([x['value'] is not None for x in self.req]) - -@CatchException -def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 尚未完成 - history = [] # 清空历史 - state = ImageEditState.get_state(chatbot, ImageEditState) - state = state.feed(prompt, chatbot) - state.lock_plugin(chatbot) - if not state.already_obtained_all_materials(): - chatbot.append(["图片修改\n\n1. 上传图片(图片中需要修改的位置用橡皮擦擦除为纯白色,即RGB=255,255,255)\n2. 输入分辨率 \n3. 输入修改需求", state.next_req()]) - yield from update_ui(chatbot=chatbot, history=history) - return - - image_path = state.req[0]['value'] - resolution = state.req[1]['value'] - prompt = state.req[2]['value'] - chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`
分辨率:`{resolution}`
修改需求:`{prompt}`"]) - yield from update_ui(chatbot=chatbot, history=history) - image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution) - chatbot.append([prompt, - f'图像中转网址:
`{image_url}`
'+ - f'中转网址预览:
' - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 - state.unlock_plugin(chatbot) - -def make_transparent(input_image_path, output_image_path): - from PIL import Image - image = Image.open(input_image_path) - image = image.convert("RGBA") - data = image.getdata() - new_data = [] - for item in data: - if item[0] == 255 and item[1] == 255 and item[2] == 255: - new_data.append((255, 255, 255, 0)) - else: - new_data.append(item) - image.putdata(new_data) - image.save(output_image_path, "PNG") - -def resize_image(input_path, output_path, max_size=1024): - from PIL import Image - with Image.open(input_path) as img: - width, height = img.size - if width > max_size or height > max_size: - if width >= height: - new_width = max_size - new_height = int((max_size / width) * height) - else: - new_height = max_size - new_width = int((max_size / height) * width) - - resized_img = img.resize(size=(new_width, new_height)) - resized_img.save(output_path) - else: - img.save(output_path) - -def make_square_image(input_path, output_path): - from PIL import Image - with Image.open(input_path) as img: - width, height = img.size - size = max(width, height) - new_img = Image.new("RGBA", (size, size), color="black") - new_img.paste(img, ((size - width) // 2, (size - height) // 2)) - new_img.save(output_path) diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" deleted file mode 100644 index 4b16b8846d46c9589a001c159ef70d5dc475c8ad..0000000000000000000000000000000000000000 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ /dev/null @@ -1,101 +0,0 @@ -# 本源代码中, ⭐ = 关键步骤 -""" -测试: - - show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg - -""" - - -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg -from crazy_functions.crazy_utils import input_clipping, try_install_deps -from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses -from crazy_functions.agent_fns.auto_agent import AutoGenMath -import time - -def remove_model_prefix(llm): - if llm.startswith('api2d-'): llm = llm.replace('api2d-', '') - if llm.startswith('azure-'): llm = llm.replace('azure-', '') - return llm - - -@CatchException -def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - # 检查当前的模型是否符合要求 - supported_llms = [ - "gpt-3.5-turbo-16k", - 'gpt-3.5-turbo-1106', - "gpt-4", - "gpt-4-32k", - 'gpt-4-1106-preview', - "azure-gpt-3.5-turbo-16k", - "azure-gpt-3.5-16k", - "azure-gpt-4", - "azure-gpt-4-32k", - ] - from request_llms.bridge_all import model_info - if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型 - chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import autogen - if get_conf("AUTOGEN_USE_DOCKER"): - import docker - except: - chatbot.append([ f"处理任务: {txt}", - f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import autogen - import glob, os, time, subprocess - if get_conf("AUTOGEN_USE_DOCKER"): - subprocess.Popen(["docker", "--version"]) - except: - chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 解锁插件 - chatbot.get_cookies()['lock_plugin'] = None - persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses() - user_uuid = chatbot.get_cookies().get('uuid') - persistent_key = f"{user_uuid}->多智能体终端" - if persistent_class_multi_user_manager.already_alive(persistent_key): - # 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端 - print('[debug] feed new user input') - executor = persistent_class_multi_user_manager.get(persistent_key) - exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume") - else: - # 运行多智能体终端 (首次) - print('[debug] create new executor instance') - history = [] - chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request) - persistent_class_multi_user_manager.set(persistent_key, executor) - exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create") - - if exit_reason == "wait_feedback": - # 当用户点击了“等待反馈”按钮时,将executor存储到cookie中,等待用户的再次调用 - executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.多智能体->多智能体终端' - else: - executor.chatbot.get_cookies()['lock_plugin'] = None - yield from update_ui(chatbot=executor.chatbot, history=executor.history) # 更新状态 diff --git "a/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" "b/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" deleted file mode 100644 index 6ffc072f634e7c786238963929011229c352d46b..0000000000000000000000000000000000000000 --- "a/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" +++ /dev/null @@ -1,152 +0,0 @@ -from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user -import re - -f_prefix = 'GPT-Academic对话存档' - -def write_chat_to_file(chatbot, history=None, file_name=None): - """ - 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 - """ - import os - import time - if file_name is None: - file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html' - fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name) - with open(fp, 'w', encoding='utf8') as f: - from themes.theme import advanced_css - f.write(f'对话历史') - for i, contents in enumerate(chatbot): - for j, content in enumerate(contents): - try: # 这个bug没找到触发条件,暂时先这样顶一下 - if type(content) != str: content = str(content) - except: - continue - f.write(content) - if j == 0: - f.write('
') - f.write('
\n\n') - f.write('
\n\n raw chat context:\n') - f.write('') - for h in history: - f.write("\n>>>" + h) - f.write('') - promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot) - return '对话历史写入:' + fp - -def gen_file_preview(file_name): - try: - with open(file_name, 'r', encoding='utf8') as f: - file_content = f.read() - # pattern to match the text between and - pattern = re.compile(r'.*?', flags=re.DOTALL) - file_content = re.sub(pattern, '', file_content) - html, history = file_content.split('
\n\n raw chat context:\n') - history = history.strip('') - history = history.strip('') - history = history.split("\n>>>") - return list(filter(lambda x:x!="", history))[0][:100] - except: - return "" - -def read_file_to_chat(chatbot, history, file_name): - with open(file_name, 'r', encoding='utf8') as f: - file_content = f.read() - # pattern to match the text between and - pattern = re.compile(r'.*?', flags=re.DOTALL) - file_content = re.sub(pattern, '', file_content) - html, history = file_content.split('
\n\n raw chat context:\n') - history = history.strip('') - history = history.strip('') - history = history.split("\n>>>") - history = list(filter(lambda x:x!="", history)) - html = html.split('
\n\n') - html = list(filter(lambda x:x!="", html)) - chatbot.clear() - for i, h in enumerate(html): - i_say, gpt_say = h.split('
') - chatbot.append([i_say, gpt_say]) - chatbot.append([f"存档文件详情?", f"[Local Message] 载入对话{len(html)}条,上下文{len(history)}条。"]) - return chatbot, history - -@CatchException -def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - - chatbot.append(("保存当前对话", - f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - -def hide_cwd(str): - import os - current_path = os.getcwd() - replace_path = "." - return str.replace(current_path, replace_path) - -@CatchException -def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - from .crazy_utils import get_files_from_everything - success, file_manifest, _ = get_files_from_everything(txt, type='.html') - - if not success: - if txt == "": txt = '空空如也的输入栏' - import glob - local_history = "
".join([ - "`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" - for f in glob.glob( - f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', - recursive=True - )]) - chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:
{local_history}"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - try: - chatbot, history = read_file_to_chat(chatbot, history, file_manifest[0]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - except: - chatbot.append([f"载入对话历史文件", f"对话历史文件损坏!"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - -@CatchException -def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - - import glob, os - local_history = "
".join([ - "`"+hide_cwd(f)+"`" - for f in glob.glob( - f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True - )]) - for f in glob.glob(f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True): - os.remove(f) - chatbot.append([f"删除所有历史对话文件", f"已删除
{local_history}"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - diff --git "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" deleted file mode 100644 index 8793ea4490c07c36688fed0ae95bbbfcbb6f073b..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ /dev/null @@ -1,127 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - # pip install python-docx 用于docx格式,跨平台 - # pip install pywin32 用于doc格式,仅支持Win平台 - for index, fp in enumerate(file_manifest): - if fp.split(".")[-1] == "docx": - from docx import Document - doc = Document(fp) - file_content = "\n".join([para.text for para in doc.paragraphs]) - else: - try: - import win32com.client - word = win32com.client.Dispatch("Word.Application") - word.visible = False - # 打开文件 - doc = word.Documents.Open(os.getcwd() + '/' + fp) - # file_content = doc.Content.Text - doc = word.ActiveDocument - file_content = doc.Range().Text - doc.Close() - word.Quit() - except: - raise RuntimeError('请先将.doc文档转换为.docx文档。') - - # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - from request_llms.bridge_all import model_info - max_token = model_info[llm_kwargs['llm_model']]['max_token'] - TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 - paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) - this_paper_history = [] - for i, paper_frag in enumerate(paper_fragments): - i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```' - i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.extend([i_say_show_user,gpt_say]) - this_paper_history.extend([i_say_show_user,gpt_say]) - - # 已经对该文章的所有片段总结完毕,如果文章被切分了, - if len(paper_fragments) > 1: - i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=this_paper_history, - sys_prompt="总结文章。" - ) - - history.extend([i_say,gpt_say]) - this_paper_history.extend([i_say,gpt_say]) - - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("所有文件都总结完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -@CatchException -def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结Word文档。函数插件贡献者: JasonGuo1。注意, 如果是.doc文件, 请先转化为.docx格式。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - from docx import Document - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - if txt.endswith('.docx') or txt.endswith('.doc'): - file_manifest = [txt] - else: - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" deleted file mode 100644 index b27bcce06c4c83d491dd5bae445be957436204f9..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" +++ /dev/null @@ -1,186 +0,0 @@ -from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder - -def split_audio_file(filename, split_duration=1000): - """ - 根据给定的切割时长将音频文件切割成多个片段。 - - Args: - filename (str): 需要被切割的音频文件名。 - split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。 - - Returns: - filelist (list): 一个包含所有切割音频片段文件路径的列表。 - - """ - from moviepy.editor import AudioFileClip - import os - os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/cut/", exist_ok=True) # 创建存储切割音频的文件夹 - - # 读取音频文件 - audio = AudioFileClip(filename) - - # 计算文件总时长和切割点 - total_duration = audio.duration - split_points = list(range(0, int(total_duration), split_duration)) - split_points.append(int(total_duration)) - filelist = [] - - # 切割音频文件 - for i in range(len(split_points) - 1): - start_time = split_points[i] - end_time = split_points[i + 1] - split_audio = audio.subclip(start_time, end_time) - split_audio.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3") - filelist.append(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3") - - audio.close() - return filelist - -def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): - import os, requests - from moviepy.editor import AudioFileClip - from request_llms.bridge_all import model_info - - # 设置OpenAI密钥和模型 - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - - whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions') - url = whisper_endpoint - headers = { - 'Authorization': f"Bearer {api_key}" - } - - os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/", exist_ok=True) - for index, fp in enumerate(file_manifest): - audio_history = [] - # 提取文件扩展名 - ext = os.path.splitext(fp)[1] - # 提取视频中的音频 - if ext not in [".mp3", ".wav", ".m4a", ".mpga"]: - audio_clip = AudioFileClip(fp) - audio_clip.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3") - fp = f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3" - # 调用whisper模型音频转文字 - voice = split_audio_file(fp) - for j, i in enumerate(voice): - with open(i, 'rb') as f: - file_content = f.read() # 读取文件内容到内存 - files = { - 'file': (os.path.basename(i), file_content), - } - data = { - "model": "whisper-1", - "prompt": parse_prompt, - 'response_format': "text" - } - - chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - proxies = get_conf('proxies') - response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text - - chatbot.append(["音频解析结果", response]) - history.extend(["音频解析结果", response]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```' - i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt=f"总结音频。音频文件名{fp}" - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.extend([i_say_show_user, gpt_say]) - audio_history.extend([i_say_show_user, gpt_say]) - - # 已经对该文章的所有片段总结完毕,如果文章被切分了 - result = "".join(audio_history) - if len(audio_history) > 1: - i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。" - i_say_show_user = f'第{index + 1}段音频的主要内容:' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=audio_history, - sys_prompt="总结文章。" - ) - history.extend([i_say, gpt_say]) - audio_history.extend([i_say, gpt_say]) - - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append((f"第{index + 1}段音频完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 删除中间文件夹 - import shutil - shutil.rmtree(f"{get_log_folder(plugin_name='audio')}/mp3") - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("所有音频都总结完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) - - -@CatchException -def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - try: - from moviepy.editor import AudioFileClip - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac'] - - if txt.endswith(tuple(extensions)): - file_manifest = [txt] - else: - file_manifest = [] - for extension in extensions: - file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True)) - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文') - yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history) - - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" deleted file mode 100644 index 1d876d080a7272cc1a9108416d619afd1af11d86..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ /dev/null @@ -1,261 +0,0 @@ -import glob, time, os, re, logging -from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_exception, get_log_folder -from toolbox import write_history_to_file, promote_file_to_downloadzone -fast_debug = False - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md") - logging.info('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self, language): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - dst_file = os.path.join(get_log_folder(), f'{gen_time_str()}.md') - with open(dst_file, 'w', encoding='utf8') as f: - manifest.append(dst_file) - f.write(res) - return manifest - -def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - # <-------- 读取Markdown文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 记录删除注释后的文本 - pfg.file_paths.append(fp) - pfg.file_contents.append(file_content) - - # <-------- 拆分过长的Markdown文件 ----------> - pfg.run_file_split(max_token_limit=1500) - n_split = len(pfg.sp_file_contents) - - # <-------- 多线程翻译开始 ----------> - if language == 'en->zh': - inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - elif language == 'zh->en': - inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - else: - inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len = 80 - ) - try: - pfg.sp_file_result = [] - for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - pfg.write_result(language) - except: - logging.error(trimmed_format_exc()) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = gen_time_str() + f"-chatgpt.md" - res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name) - promote_file_to_downloadzone(res, chatbot=chatbot) - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -def get_files_from_everything(txt, preference=''): - if txt == "": return False, None, None - success = True - if txt.startswith('http'): - import requests - from toolbox import get_conf - proxies = get_conf('proxies') - # 网络的远程文件 - if preference == 'Github': - logging.info('正在从github下载资源 ...') - if not txt.endswith('.md'): - # Make a request to the GitHub API to retrieve the repository information - url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme' - response = requests.get(url, proxies=proxies) - txt = response.json()['download_url'] - else: - txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/") - txt = txt.replace("/blob/", "/") - - r = requests.get(txt, proxies=proxies) - download_local = f'{get_log_folder(plugin_name="批量Markdown翻译")}/raw-readme-{gen_time_str()}.md' - project_folder = f'{get_log_folder(plugin_name="批量Markdown翻译")}' - with open(download_local, 'wb+') as f: f.write(r.content) - file_manifest = [download_local] - elif txt.endswith('.md'): - # 直接给定文件 - file_manifest = [txt] - project_folder = os.path.dirname(txt) - elif os.path.exists(txt): - # 本地路径,递归搜索 - project_folder = txt - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)] - else: - project_folder = None - file_manifest = [] - success = False - - return success, file_manifest, project_folder - - -@CatchException -def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - - success, file_manifest, project_folder = get_files_from_everything(txt, preference="Github") - - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') - - - - - -@CatchException -def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') - - -@CatchException -def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - language = plugin_kwargs.get("advanced_arg", 'Chinese') - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language) \ No newline at end of file diff --git "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" deleted file mode 100644 index 54270ab982b55d59e5b8f7cb1e0f27209cc6c83d..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" +++ /dev/null @@ -1,145 +0,0 @@ -from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str -from toolbox import CatchException, report_exception -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import read_and_clean_pdf_text -from .crazy_utils import input_clipping - - - -def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - file_write_buffer = [] - for file_name in file_manifest: - print('begin analysis on:', file_name) - ############################## <第 0 步,切割PDF> ################################## - # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割) - # 的长度必须小于 2500 个 Token - file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - - TOKEN_LIMIT_PER_FRAGMENT = 2500 - - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) - page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model']) - # 为了更好的效果,我们剥离Introduction之后的部分(如果有) - paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - - ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ################################## - final_results = [] - final_results.append(paper_meta) - - ############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ################################## - i_say_show_user = f'首先你在中文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI - - iteration_results = [] - last_iteration_result = paper_meta # 初始值是摘要 - MAX_WORD_TOTAL = 4096 * 0.7 - n_fragment = len(paper_fragments) - if n_fragment >= 20: print('文章极长,不能达到预期效果') - for i in range(n_fragment): - NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment - i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i]}" - i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i][:200]}" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, - history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extract the main idea of this section with Chinese." # 提示 - ) - iteration_results.append(gpt_say) - last_iteration_result = gpt_say - - ############################## <第 3 步,整理history,提取总结> ################################## - final_results.extend(iteration_results) - final_results.append(f'Please conclude this paper discussed above。') - # This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py - NUM_OF_WORD = 1000 - i_say = """ -1. Mark the title of the paper (with Chinese translation) -2. list all the authors' names (use English) -3. mark the first author's affiliation (output Chinese translation only) -4. mark the keywords of this article (use English) -5. link to the paper, Github code link (if available, fill in Github:None if not) -6. summarize according to the following four points.Be sure to use Chinese answers (proper nouns need to be marked in English) - - (1):What is the research background of this article? - - (2):What are the past methods? What are the problems with them? Is the approach well motivated? - - (3):What is the research methodology proposed in this paper? - - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals? -Follow the format of the output that follows: -1. Title: xxx\n\n -2. Authors: xxx\n\n -3. Affiliation: xxx\n\n -4. Keywords: xxx\n\n -5. Urls: xxx or xxx , xxx \n\n -6. Summary: \n\n - - (1):xxx;\n - - (2):xxx;\n - - (3):xxx;\n - - (4):xxx.\n\n -Be sure to use Chinese answers (proper nouns need to be marked in English), statements as concise and academic as possible, -do not have too much repetitive information, numerical values using the original numbers. - """ - # This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py - file_write_buffer.extend(final_results) - i_say, final_results = input_clipping(i_say, final_results, max_token_limit=2000) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user='开始最终总结', - llm_kwargs=llm_kwargs, chatbot=chatbot, history=final_results, - sys_prompt= f"Extract the main idea of this paper with less than {NUM_OF_WORD} Chinese characters" - ) - final_results.append(gpt_say) - file_write_buffer.extend([i_say, gpt_say]) - ############################## <第 4 步,设置一个token上限> ################################## - _, final_results = input_clipping("", final_results, max_token_limit=3200) - yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了 - - res = write_history_to_file(file_write_buffer) - promote_file_to_downloadzone(res, chatbot=chatbot) - yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面 - - -@CatchException -def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" deleted file mode 100644 index 181d51ce1b40ae0f25429fa71a056160202296c9..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" +++ /dev/null @@ -1,162 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import write_history_to_file, promote_file_to_downloadzone - -fast_debug = False - -def readPdf(pdfPath): - """ - 读取pdf文件,返回文本内容 - """ - import pdfminer - from pdfminer.pdfparser import PDFParser - from pdfminer.pdfdocument import PDFDocument - from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed - from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter - from pdfminer.pdfdevice import PDFDevice - from pdfminer.layout import LAParams - from pdfminer.converter import PDFPageAggregator - - fp = open(pdfPath, 'rb') - - # Create a PDF parser object associated with the file object - parser = PDFParser(fp) - - # Create a PDF document object that stores the document structure. - # Password for initialization as 2nd parameter - document = PDFDocument(parser) - # Check if the document allows text extraction. If not, abort. - if not document.is_extractable: - raise PDFTextExtractionNotAllowed - - # Create a PDF resource manager object that stores shared resources. - rsrcmgr = PDFResourceManager() - - # Create a PDF device object. - # device = PDFDevice(rsrcmgr) - - # BEGIN LAYOUT ANALYSIS. - # Set parameters for analysis. - laparams = LAParams( - char_margin=10.0, - line_margin=0.2, - boxes_flow=0.2, - all_texts=False, - ) - # Create a PDF page aggregator object. - device = PDFPageAggregator(rsrcmgr, laparams=laparams) - # Create a PDF interpreter object. - interpreter = PDFPageInterpreter(rsrcmgr, device) - - # loop over all pages in the document - outTextList = [] - for page in PDFPage.create_pages(document): - # read the page into a layout object - interpreter.process_page(page) - layout = device.get_result() - for obj in layout._objs: - if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): - # print(obj.get_text()) - outTextList.append(obj.get_text()) - - return outTextList - - -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os - from bs4 import BeautifulSoup - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - if ".tex" in fp: - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - if ".pdf" in fp.lower(): - file_content = readPdf(fp) - file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" deleted file mode 100644 index 7a18277778df2c1044eeeb5524ee6f3dff78f982..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" +++ /dev/null @@ -1,125 +0,0 @@ -from toolbox import CatchException, report_exception, get_log_folder, gen_time_str -from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency -from .crazy_utils import read_and_clean_pdf_text -from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf -from colorful import * -import copy -import os -import math -import logging - -def markdown_to_dict(article_content): - import markdown - from bs4 import BeautifulSoup - cur_t = "" - cur_c = "" - results = {} - for line in article_content: - if line.startswith('#'): - if cur_t!="": - if cur_t not in results: - results.update({cur_t:cur_c.lstrip('\n')}) - else: - # 处理重名的章节 - results.update({cur_t + " " + gen_time_str():cur_c.lstrip('\n')}) - cur_t = line.rstrip('\n') - cur_c = "" - else: - cur_c += line - results_final = {} - for k in list(results.keys()): - if k.startswith('# '): - results_final['title'] = k.split('# ')[-1] - results_final['authors'] = results.pop(k).lstrip('\n') - if k.startswith('###### Abstract'): - results_final['abstract'] = results.pop(k).lstrip('\n') - - results_final_sections = [] - for k,v in results.items(): - results_final_sections.append({ - 'heading':k.lstrip("# "), - 'text':v if len(v) > 0 else f"The beginning of {k.lstrip('# ')} section." - }) - results_final['sections'] = results_final_sections - return results_final - - -@CatchException -def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - - disable_auto_promotion(chatbot) - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量翻译PDF文档。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 清空历史,以免输入溢出 - history = [] - - from .crazy_utils import get_files_from_everything - success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') - if len(file_manifest) > 0: - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import nougat - import tiktoken - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') - success = success or success_mmd - file_manifest += file_manifest_mmd - chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]); - yield from update_ui( chatbot=chatbot, history=history) - # 检测输入参数,如没有给定输入参数,直接退出 - if not success: - if txt == "": txt = '空空如也的输入栏' - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - - - -def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import copy - import tiktoken - TOKEN_LIMIT_PER_FRAGMENT = 1024 - generated_conclusion_files = [] - generated_html_files = [] - DST_LANG = "中文" - from crazy_functions.crazy_utils import nougat_interface - from crazy_functions.pdf_fns.report_gen_html import construct_html - nougat_handle = nougat_interface() - for index, fp in enumerate(file_manifest): - if fp.endswith('pdf'): - chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) - promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) - else: - chatbot.append(["当前论文无需解析:", fp]); yield from update_ui( chatbot=chatbot, history=history) - fpp = fp - with open(fpp, 'r', encoding='utf8') as f: - article_content = f.readlines() - article_dict = markdown_to_dict(article_content) - logging.info(article_dict) - yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG) - - chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" deleted file mode 100644 index 3d111629ae58ba1f1d8887e3dabbe18c45d746c4..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" +++ /dev/null @@ -1,177 +0,0 @@ -from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages -from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency -from .crazy_utils import read_and_clean_pdf_text -from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf -from colorful import * -import os - - -@CatchException -def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - - disable_auto_promotion(chatbot) - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量翻译PDF文档。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - check_packages(["fitz", "tiktoken", "scipdf"]) - except: - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - from .crazy_utils import get_files_from_everything - success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') - # 检测输入参数,如没有给定输入参数,直接退出 - if not success: - if txt == "": txt = '空空如也的输入栏' - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - grobid_url = get_avail_grobid_url() - if grobid_url is not None: - yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url) - else: - yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3) - yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url): - import copy, json - TOKEN_LIMIT_PER_FRAGMENT = 1024 - generated_conclusion_files = [] - generated_html_files = [] - DST_LANG = "中文" - from crazy_functions.pdf_fns.report_gen_html import construct_html - for index, fp in enumerate(file_manifest): - chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - article_dict = parse_pdf(fp, grobid_url) - grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json") - with open(grobid_json_res, 'w+', encoding='utf8') as f: - f.write(json.dumps(article_dict, indent=4, ensure_ascii=False)) - promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot) - - if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") - yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG) - chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - """ - 此函数已经弃用 - """ - import copy - TOKEN_LIMIT_PER_FRAGMENT = 1024 - generated_conclusion_files = [] - generated_html_files = [] - from crazy_functions.pdf_fns.report_gen_html import construct_html - for index, fp in enumerate(file_manifest): - # 读取PDF文件 - file_content, page_one = read_and_clean_pdf_text(fp) - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - - # 递归地切割PDF文件 - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) - page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=page_one, limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model']) - - # 为了更好的效果,我们剥离Introduction之后的部分(如果有) - paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - - # 单线,获取文章meta信息 - paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}", - inputs_show_user=f"请从{fp}中提取出“标题”、“收录会议或期刊”等基本信息。", - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="Your job is to collect information from materials。", - ) - - # 多线,翻译 - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=[ - f"你需要翻译以下内容:\n{frag}" for frag in paper_fragments], - inputs_show_user_array=[f"\n---\n 原文: \n\n {frag.replace('#', '')} \n---\n 翻译:\n " for frag in paper_fragments], - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[paper_meta] for _ in paper_fragments], - sys_prompt_array=[ - "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments], - # max_workers=5 # OpenAI所允许的最大并行过载 - ) - gpt_response_collection_md = copy.deepcopy(gpt_response_collection) - # 整理报告的格式 - for i,k in enumerate(gpt_response_collection_md): - if i%2==0: - gpt_response_collection_md[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection_md)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection_md)//2}]:\n " - else: - gpt_response_collection_md[i] = gpt_response_collection_md[i] - final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""] - final.extend(gpt_response_collection_md) - create_report_file_name = f"{os.path.basename(fp)}.trans.md" - res = write_history_to_file(final, create_report_file_name) - promote_file_to_downloadzone(res, chatbot=chatbot) - - # 更新UI - generated_conclusion_files.append(f'{get_log_folder()}/{create_report_file_name}') - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # write html - try: - ch = construct_html() - orig = "" - trans = "" - gpt_response_collection_html = copy.deepcopy(gpt_response_collection) - for i,k in enumerate(gpt_response_collection_html): - if i%2==0: - gpt_response_collection_html[i] = paper_fragments[i//2].replace('#', '') - else: - gpt_response_collection_html[i] = gpt_response_collection_html[i] - final = ["论文概况", paper_meta_info.replace('# ', '### '), "二、论文翻译", ""] - final.extend(gpt_response_collection_html) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{os.path.basename(fp)}.trans.html" - generated_html_files.append(ch.save_file(create_report_file_name)) - except: - from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) - - # 准备文件的下载 - for pdf_path in generated_conclusion_files: - # 重命名文件 - rename_file = f'翻译-{os.path.basename(pdf_path)}' - promote_file_to_downloadzone(pdf_path, rename_file=rename_file, chatbot=chatbot) - for html_path in generated_html_files: - # 重命名文件 - rename_file = f'翻译-{os.path.basename(html_path)}' - promote_file_to_downloadzone(html_path, rename_file=rename_file, chatbot=chatbot) - chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files))) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - diff --git "a/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" "b/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" deleted file mode 100644 index 9465cccd10580fb551e0866867262143ab1b2d4e..0000000000000000000000000000000000000000 --- "a/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" +++ /dev/null @@ -1,191 +0,0 @@ -import os -from toolbox import CatchException, update_ui, gen_time_str, promote_file_to_downloadzone -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from crazy_functions.crazy_utils import input_clipping - -def inspect_dependency(chatbot, history): - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import manim - return True - except: - chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manim manimgl```"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return False - -def eval_manim(code): - import subprocess, sys, os, shutil - - with open('gpt_log/MyAnimation.py', 'w', encoding='utf8') as f: - f.write(code) - - def get_class_name(class_string): - import re - # Use regex to extract the class name - class_name = re.search(r'class (\w+)\(', class_string).group(1) - return class_name - - class_name = get_class_name(code) - - try: - time_str = gen_time_str() - subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"]) - shutil.move(f'media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{time_str}.mp4') - return f'gpt_log/{time_str}.mp4' - except subprocess.CalledProcessError as e: - output = e.output.decode() - print(f"Command returned non-zero exit status {e.returncode}: {output}.") - return f"Evaluating python script failed: {e.output}." - except: - print('generating mp4 failed') - return "Generating mp4 failed." - - -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) != 1: - raise RuntimeError("GPT is not generating proper code.") - return matches[0].strip('python') # code block - -@CatchException -def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - # 清空历史,以免输入溢出 - history = [] - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "生成数学动画, 此插件处于开发阶段, 建议暂时不要使用, 作者: binary-husky, 插件初始化中 ..." - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 - dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 - if not dep_ok: return - - # 输入 - i_say = f'Generate a animation to show: ' + txt - demo = ["Here is some examples of manim", examples_of_manim()] - _, demo = input_clipping(inputs="", history=demo, max_token_limit=2560) - # 开始 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, - sys_prompt= - r"Write a animation script with 3blue1brown's manim. "+ - r"Please begin with `from manim import *`. " + - r"Answer me with a code block wrapped by ```." - ) - chatbot.append(["开始生成动画", "..."]) - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # 将代码转为动画 - code = get_code_block(gpt_say) - res = eval_manim(code) - - chatbot.append(("生成的视频文件路径", res)) - if os.path.exists(res): - promote_file_to_downloadzone(res, chatbot=chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -# 在这里放一些网上搜集的demo,辅助gpt生成代码 -def examples_of_manim(): - return r""" - - -``` - -class MovingGroupToDestination(Scene): - def construct(self): - group = VGroup(Dot(LEFT), Dot(ORIGIN), Dot(RIGHT, color=RED), Dot(2 * RIGHT)).scale(1.4) - dest = Dot([4, 3, 0], color=YELLOW) - self.add(group, dest) - self.play(group.animate.shift(dest.get_center() - group[2].get_center())) - self.wait(0.5) - -``` - - -``` - -class LatexWithMovingFramebox(Scene): - def construct(self): - text=MathTex( - "\\frac{d}{dx}f(x)g(x)=","f(x)\\frac{d}{dx}g(x)","+", - "g(x)\\frac{d}{dx}f(x)" - ) - self.play(Write(text)) - framebox1 = SurroundingRectangle(text[1], buff = .1) - framebox2 = SurroundingRectangle(text[3], buff = .1) - self.play( - Create(framebox1), - ) - self.wait() - self.play( - ReplacementTransform(framebox1,framebox2), - ) - self.wait() - -``` - - - -``` - -class PointWithTrace(Scene): - def construct(self): - path = VMobject() - dot = Dot() - path.set_points_as_corners([dot.get_center(), dot.get_center()]) - def update_path(path): - previous_path = path.copy() - previous_path.add_points_as_corners([dot.get_center()]) - path.become(previous_path) - path.add_updater(update_path) - self.add(path, dot) - self.play(Rotating(dot, radians=PI, about_point=RIGHT, run_time=2)) - self.wait() - self.play(dot.animate.shift(UP)) - self.play(dot.animate.shift(LEFT)) - self.wait() - -``` - -``` - -# do not use get_graph, this funciton is deprecated - -class ExampleFunctionGraph(Scene): - def construct(self): - cos_func = FunctionGraph( - lambda t: np.cos(t) + 0.5 * np.cos(7 * t) + (1 / 7) * np.cos(14 * t), - color=RED, - ) - - sin_func_1 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - color=BLUE, - ) - - sin_func_2 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - x_range=[-4, 4], - color=GREEN, - ).move_to([0, 1, 0]) - - self.add(cos_func, sin_func_1, sin_func_2) - -``` -""" \ No newline at end of file diff --git "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" deleted file mode 100644 index 732c82c08db5f33b43452a33d26347814e4be5db..0000000000000000000000000000000000000000 --- "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ /dev/null @@ -1,109 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from .crazy_utils import read_and_clean_pdf_text -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import tiktoken - print('begin analysis on:', file_name) - - ############################## <第 0 步,切割PDF> ################################## - # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割) - # 的长度必须小于 2500 个 Token - file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - - TOKEN_LIMIT_PER_FRAGMENT = 2500 - - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) - page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model']) - # 为了更好的效果,我们剥离Introduction之后的部分(如果有) - paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - - ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ################################## - final_results = [] - final_results.append(paper_meta) - - ############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ################################## - i_say_show_user = f'首先你在英文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI - - iteration_results = [] - last_iteration_result = paper_meta # 初始值是摘要 - MAX_WORD_TOTAL = 4096 - n_fragment = len(paper_fragments) - if n_fragment >= 20: print('文章极长,不能达到预期效果') - for i in range(n_fragment): - NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment - i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}" - i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]} ...." - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, - history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extract the main idea of this section, answer me with Chinese." # 提示 - ) - iteration_results.append(gpt_say) - last_iteration_result = gpt_say - - ############################## <第 3 步,整理history> ################################## - final_results.extend(iteration_results) - final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。') - # 接下来两句话只显示在界面上,不起实际作用 - i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。" - chatbot.append([i_say_show_user, gpt_say]) - - ############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ################################## - from .crazy_utils import input_clipping - _, final_results = input_clipping("", final_results, max_token_limit=3200) - yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了 - - -@CatchException -def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": - txt = '空空如也的输入栏' - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - txt = file_manifest[0] - # 开始正式执行任务 - yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" "b/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" index 78aa45355616c2f51570a71f719ede0eff18b549..6fd2ee6d0d03426105886f2d6b503ae064babad3 100644 --- "a/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" +++ "b/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" @@ -1,56 +1,57 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down fast_debug = False -def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os + +def 生成函数注释(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): + import time, glob, os print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: + with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```' i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + print('[1] yield chatbot, history') + yield chatbot, history, '正常' if not fast_debug: msg = '正常' # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + print('[2] end gpt req') chatbot[-1] = (i_say_show_user, gpt_say) history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 + print('[3] yield chatbot, history') + yield chatbot, history, msg + print('[4] next') if not fast_debug: time.sleep(2) if not fast_debug: - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) + res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 + yield chatbot, history, msg @CatchException -def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 批量生成函数注释(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + yield chatbot, history, '正常' return - yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + yield from 生成函数注释(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git "a/crazy_functions/\347\224\237\346\210\220\345\244\232\347\247\215Mermaid\345\233\276\350\241\250.py" "b/crazy_functions/\347\224\237\346\210\220\345\244\232\347\247\215Mermaid\345\233\276\350\241\250.py" deleted file mode 100644 index dc01e9405343f39ee2e19648a0045b230a73f163..0000000000000000000000000000000000000000 --- "a/crazy_functions/\347\224\237\346\210\220\345\244\232\347\247\215Mermaid\345\233\276\350\241\250.py" +++ /dev/null @@ -1,296 +0,0 @@ -from toolbox import CatchException, update_ui, report_exception -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime - -#以下是每类图表的PROMPT -SELECT_PROMPT = """ -“{subject}” -============= -以上是从文章中提取的摘要,将会使用这些摘要绘制图表。请你选择一个合适的图表类型: -1 流程图 -2 序列图 -3 类图 -4 饼图 -5 甘特图 -6 状态图 -7 实体关系图 -8 象限提示图 -不需要解释原因,仅需要输出单个不带任何标点符号的数字。 -""" -#没有思维导图!!!测试发现模型始终会优先选择思维导图 -#流程图 -PROMPT_1 = """ -请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例: -```mermaid -graph TD - P(编程) --> L1(Python) - P(编程) --> L2(C) - P(编程) --> L3(C++) - P(编程) --> L4(Javascipt) - P(编程) --> L5(PHP) -``` -""" -#序列图 -PROMPT_2 = """ -请你给出围绕“{subject}”的序列图,使用mermaid语法,mermaid语法举例: -```mermaid -sequenceDiagram - participant A as 用户 - participant B as 系统 - A->>B: 登录请求 - B->>A: 登录成功 - A->>B: 获取数据 - B->>A: 返回数据 -``` -""" -#类图 -PROMPT_3 = """ -请你给出围绕“{subject}”的类图,使用mermaid语法,mermaid语法举例: -```mermaid -classDiagram - Class01 <|-- AveryLongClass : Cool - Class03 *-- Class04 - Class05 o-- Class06 - Class07 .. Class08 - Class09 --> C2 : Where am i? - Class09 --* C3 - Class09 --|> Class07 - Class07 : equals() - Class07 : Object[] elementData - Class01 : size() - Class01 : int chimp - Class01 : int gorilla - Class08 <--> C2: Cool label -``` -""" -#饼图 -PROMPT_4 = """ -请你给出围绕“{subject}”的饼图,使用mermaid语法,mermaid语法举例: -```mermaid -pie title Pets adopted by volunteers - "狗" : 386 - "猫" : 85 - "兔子" : 15 -``` -""" -#甘特图 -PROMPT_5 = """ -请你给出围绕“{subject}”的甘特图,使用mermaid语法,mermaid语法举例: -```mermaid -gantt - title 项目开发流程 - dateFormat YYYY-MM-DD - section 设计 - 需求分析 :done, des1, 2024-01-06,2024-01-08 - 原型设计 :active, des2, 2024-01-09, 3d - UI设计 : des3, after des2, 5d - section 开发 - 前端开发 :2024-01-20, 10d - 后端开发 :2024-01-20, 10d -``` -""" -#状态图 -PROMPT_6 = """ -请你给出围绕“{subject}”的状态图,使用mermaid语法,mermaid语法举例: -```mermaid -stateDiagram-v2 - [*] --> Still - Still --> [*] - Still --> Moving - Moving --> Still - Moving --> Crash - Crash --> [*] -``` -""" -#实体关系图 -PROMPT_7 = """ -请你给出围绕“{subject}”的实体关系图,使用mermaid语法,mermaid语法举例: -```mermaid -erDiagram - CUSTOMER ||--o{ ORDER : places - ORDER ||--|{ LINE-ITEM : contains - CUSTOMER { - string name - string id - } - ORDER { - string orderNumber - date orderDate - string customerID - } - LINE-ITEM { - number quantity - string productID - } -``` -""" -#象限提示图 -PROMPT_8 = """ -请你给出围绕“{subject}”的象限图,使用mermaid语法,mermaid语法举例: -```mermaid -graph LR - A[Hard skill] --> B(Programming) - A[Hard skill] --> C(Design) - D[Soft skill] --> E(Coordination) - D[Soft skill] --> F(Communication) -``` -""" -#思维导图 -PROMPT_9 = """ -{subject} -========== -请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,mermaid语法举例: -```mermaid -mindmap - root((mindmap)) - Origins - Long history - ::icon(fa fa-book) - Popularisation - British popular psychology author Tony Buzan - Research - On effectiveness
and features - On Automatic creation - Uses - Creative techniques - Strategic planning - Argument mapping - Tools - Pen and paper - Mermaid -``` -""" - -def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs): - ############################## <第 0 步,切割输入> ################################## - # 借用PDF切割中的函数对文本进行切割 - TOKEN_LIMIT_PER_FRAGMENT = 2500 - txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) - ############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ################################## - results = [] - MAX_WORD_TOTAL = 4096 - n_txt = len(txt) - last_iteration_result = "从以下文本中提取摘要。" - if n_txt >= 20: print('文章极长,不能达到预期效果') - for i in range(n_txt): - NUM_OF_WORD = MAX_WORD_TOTAL // n_txt - i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}" - i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...." - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, - history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示 - ) - results.append(gpt_say) - last_iteration_result = gpt_say - ############################## <第 2 步,根据整理的摘要选择图表类型> ################################## - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数 - results_txt = '\n'.join(results) #合并摘要 - if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断 - i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI - i_say = SELECT_PROMPT.format(subject=results_txt) - i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。' - for i in range(3): - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="" - ) - if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确 - break - if gpt_say not in ['1','2','3','4','5','6','7','8','9']: - gpt_say = '1' - ############################## <第 3 步,根据选择的图表类型绘制图表> ################################## - if gpt_say == '1': - i_say = PROMPT_1.format(subject=results_txt) - elif gpt_say == '2': - i_say = PROMPT_2.format(subject=results_txt) - elif gpt_say == '3': - i_say = PROMPT_3.format(subject=results_txt) - elif gpt_say == '4': - i_say = PROMPT_4.format(subject=results_txt) - elif gpt_say == '5': - i_say = PROMPT_5.format(subject=results_txt) - elif gpt_say == '6': - i_say = PROMPT_6.format(subject=results_txt) - elif gpt_say == '7': - i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号 - elif gpt_say == '8': - i_say = PROMPT_8.format(subject=results_txt) - elif gpt_say == '9': - i_say = PROMPT_9.format(subject=results_txt) - i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="" - ) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -@CatchException -def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - import os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ - \n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if os.path.exists(txt): #如输入区无内容则直接解析历史记录 - from crazy_functions.pdf_fns.parse_word import extract_text_from_files - file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history) - else: - file_exist = False - excption = "" - file_manifest = [] - - if excption != "": - if excption == "word": - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。") - - elif excption == "pdf": - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - - elif excption == "word_pip": - report_exception(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") - - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - else: - if not file_exist: - history.append(txt) #如输入区不是文件则将输入区内容加入历史记录 - i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI - yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs) - else: - file_num = len(file_manifest) - for i in range(file_num): #依次处理文件 - i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI - history = [] #如输入区内容为文件则清空历史记录 - history.append(final_result[i]) - yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs) \ No newline at end of file diff --git "a/crazy_functions/\347\237\245\350\257\206\345\272\223\351\227\256\347\255\224.py" "b/crazy_functions/\347\237\245\350\257\206\345\272\223\351\227\256\347\255\224.py" deleted file mode 100644 index f3c7c9e3fc95305bc470d122c89a12f1786c94ee..0000000000000000000000000000000000000000 --- "a/crazy_functions/\347\237\245\350\257\206\345\272\223\351\227\256\347\255\224.py" +++ /dev/null @@ -1,117 +0,0 @@ -from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything - -install_msg =""" - -1. python -m pip install torch --index-url https://download.pytorch.org/whl/cpu - -2. python -m pip install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade - -3. python -m pip install unstructured[all-docs] --upgrade - -4. python -c 'import nltk; nltk.download("punkt")' -""" - -@CatchException -def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - - # < --------------------读取参数--------------- > - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - kai_id = plugin_kwargs.get("advanced_arg", 'default') - - chatbot.append((f"向`{kai_id}`知识库中添加文件。", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # resolve deps - try: - # from zh_langchain import construct_vector_store - # from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from crazy_functions.vector_fns.vector_database import knowledge_archive_interface - except Exception as e: - chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # from .crazy_utils import try_install_deps - # try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) - # yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) - return - - # < --------------------读取文件--------------- > - file_manifest = [] - spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"] - for sp in spl: - _, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}') - file_manifest += file_manifest_tmp - - if len(file_manifest) == 0: - chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # < -------------------预热文本向量化模组--------------- > - chatbot.append(['
'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Checking Text2vec ...') - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 - HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") - - # < -------------------构建知识库--------------- > - chatbot.append(['
'.join(file_manifest), "正在构建知识库..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Establishing knowledge archive ...') - with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 - kai = knowledge_archive_interface() - vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store') - kai.feed_archive(file_manifest=file_manifest, vs_path=vs_path, id=kai_id) - kai_files = kai.get_loaded_file(vs_path=vs_path) - kai_files = '
'.join(kai_files) - # chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"]) - # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id() - # chatbot._cookies['lock_plugin'] = 'crazy_functions.知识库文件注入->读取知识库作答' - # chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"]) - chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - -@CatchException -def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request=-1): - # resolve deps - try: - # from zh_langchain import construct_vector_store - # from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from crazy_functions.vector_fns.vector_database import knowledge_archive_interface - except Exception as e: - chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # from .crazy_utils import try_install_deps - # try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) - # yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) - return - - # < ------------------- --------------- > - kai = knowledge_archive_interface() - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - kai_id = plugin_kwargs.get("advanced_arg", 'default') - vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store') - resp, prompt = kai.answer_with_archive_by_id(txt, kai_id, vs_path) - - chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt=system_prompt - ) - history.extend((prompt, gpt_say)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" deleted file mode 100644 index 346492dbf5e07669856898b09b241f9c247c0cc6..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ /dev/null @@ -1,106 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llms.bridge_all import model_info - -def google(query, proxies): - query = query # 在此处替换您要搜索的关键词 - url = f"https://www.google.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('div', class_='g'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if link.startswith('/url?q='): - link = link[7:] - if not link.startswith('http'): - continue - title = g.find('h3').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies = get_conf('proxies') - urls = google(txt, proxies) - history = [] - if len(urls) == 0: - chatbot.append((f"结论:{txt}", - "[Local Message] 受到google限制,无法从google获取信息!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - return - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 5 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" deleted file mode 100644 index eff6f8f9a97cd24abf504ba994c62d03b51c3346..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" +++ /dev/null @@ -1,106 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llms.bridge_all import model_info - - -def bing_search(query, proxies=None): - query = query - url = f"https://cn.bing.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('li', class_='b_algo'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if not link.startswith('http'): - continue - title = g.find('h2').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies = get_conf('proxies') - urls = bing_search(txt, proxies) - history = [] - if len(urls) == 0: - chatbot.append((f"结论:{txt}", - "[Local Message] 受到bing限制,无法从bing获取信息!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - return - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 8 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git "a/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" "b/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" deleted file mode 100644 index 27f449969d7b6aee345040a25c60656356102d74..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" +++ /dev/null @@ -1,180 +0,0 @@ -""" -Explanation of the Void Terminal Plugin: - -Please describe in natural language what you want to do. - -1. You can open the plugin's dropdown menu to explore various capabilities of this project, and then describe your needs in natural language, for example: -- "Please call the plugin to translate a PDF paper for me. I just uploaded the paper to the upload area." -- "Please use the plugin to translate a PDF paper, with the address being https://www.nature.com/articles/s41586-019-1724-z.pdf." -- "Generate an image with blooming flowers and lush green grass using the plugin." -- "Translate the README using the plugin. The GitHub URL is https://github.com/facebookresearch/co-tracker." -- "Translate an Arxiv paper for me. The Arxiv ID is 1812.10695. Remember to use the plugin and don't do it manually!" -- "I don't like the current interface color. Modify the configuration and change the theme to THEME="High-Contrast"." -- "Could you please explain the structure of the Transformer network?" - -2. If you use keywords like "call the plugin xxx", "modify the configuration xxx", "please", etc., your intention can be recognized more accurately. - -3. Your intention can be recognized more accurately when using powerful models like GPT4. This plugin is relatively new, so please feel free to provide feedback on GitHub. - -4. Now, if you need to process a file, please upload the file (drag the file to the file upload area) or describe the path to the file. - -5. If you don't need to upload a file, you can simply repeat your command again. -""" -explain_msg = """ -## 虚空终端插件说明: - -1. 请用**自然语言**描述您需要做什么。例如: - - 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了」 - - 「请调用插件翻译PDF论文,地址为https://openreview.net/pdf?id=rJl0r3R9KX」 - - 「把Arxiv论文翻译成中文PDF,arxiv论文的ID是1812.10695,记得用插件!」 - - 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现」 - - 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」 - - 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"」 - - 「请调用插件,解析python源代码项目,代码我刚刚打包拖到上传区了」 - - 「请问Transformer网络的结构是怎样的?」 - -2. 您可以打开插件下拉菜单以了解本项目的各种能力。 - -3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词,您的意图可以被识别的更准确。 - -4. 建议使用 GPT3.5 或更强的模型,弱模型可能无法理解您的想法。该插件诞生时间不长,欢迎您前往Github反馈问题。 - -5. 现在,如果需要处理文件,请您上传文件(将文件拖动到文件上传区),或者描述文件所在的路径。 - -6. 如果不需要上传文件,现在您只需要再次重复一次您的指令即可。 -""" - -from pydantic import BaseModel, Field -from typing import List -from toolbox import CatchException, update_ui, is_the_upload_folder -from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llms.bridge_all import predict_no_ui_long_connection -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from crazy_functions.crazy_utils import input_clipping -from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError -from crazy_functions.vt_fns.vt_state import VoidTerminalState -from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot -from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot -from crazy_functions.vt_fns.vt_call_plugin import execute_plugin - -class UserIntention(BaseModel): - user_prompt: str = Field(description="the content of user input", default="") - intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="ExecutePlugin") - user_provide_file: bool = Field(description="whether the user provides a path to a file", default=False) - user_provide_url: bool = Field(description="whether the user provides a url", default=False) - - -def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt=system_prompt - ) - chatbot[-1] = [txt, gpt_say] - history.extend([txt, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - pass - - -explain_intention_to_user = { - 'Chat': "聊天对话", - 'ExecutePlugin': "调用插件", - 'ModifyConfiguration': "修改配置", -} - - -def analyze_intention_with_simple_rules(txt): - user_intention = UserIntention() - user_intention.user_prompt = txt - is_certain = False - - if '请问' in txt: - is_certain = True - user_intention.intention_type = 'Chat' - - if '用插件' in txt: - is_certain = True - user_intention.intention_type = 'ExecutePlugin' - - if '修改配置' in txt: - is_certain = True - user_intention.intention_type = 'ModifyConfiguration' - - return is_certain, user_intention - - -@CatchException -def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - disable_auto_promotion(chatbot=chatbot) - # 获取当前虚空终端状态 - state = VoidTerminalState.get_state(chatbot) - appendix_msg = "" - - # 用简单的关键词检测用户意图 - is_certain, _ = analyze_intention_with_simple_rules(txt) - if is_the_upload_folder(txt): - state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False) - appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。" - - if is_certain or (state.has_provided_explaination): - # 如果意图明确,跳过提示环节 - state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) - state.unlock_plugin(chatbot=chatbot) - yield from update_ui(chatbot=chatbot, history=history) - yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request) - return - else: - # 如果意图模糊,提示 - state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True) - state.lock_plugin(chatbot=chatbot) - chatbot.append(("虚空终端状态:", explain_msg+appendix_msg)) - yield from update_ui(chatbot=chatbot, history=history) - return - - - -def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] - chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # ⭐ ⭐ ⭐ 分析用户意图 - is_certain, user_intention = analyze_intention_with_simple_rules(txt) - if not is_certain: - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0) - gpt_json_io = GptJsonIO(UserIntention) - rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']" - inputs = "Analyze the intention of the user according to following user input: \n\n" + \ - ">> " + (txt+rf_req).rstrip('\n').replace('\n','\n>> ') + '\n\n' + gpt_json_io.format_instructions - run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[]) - analyze_res = run_gpt_fn(inputs, "") - try: - user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn) - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", - except JsonStringError as e: - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0) - return - else: - pass - - yield from update_ui_lastest_msg( - lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}", - chatbot=chatbot, history=history, delay=0) - - # 用户意图: 修改本项目的配置 - if user_intention.intention_type == 'ModifyConfiguration': - yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) - - # 用户意图: 调度插件 - if user_intention.intention_type == 'ExecutePlugin': - yield from execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) - - # 用户意图: 聊天 - if user_intention.intention_type == 'Chat': - yield from chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention) - - return - diff --git "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" "b/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" deleted file mode 100644 index 2f2c088378822284c50487ea17473bde47214f58..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" +++ /dev/null @@ -1,146 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from toolbox import write_history_to_file, promote_file_to_downloadzone -fast_debug = True - - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llms.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit - segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append( - self.file_paths[index] + f".part-{j}.txt") - - - -def parseNotebook(filename, enable_markdown=1): - import json - - CodeBlocks = [] - with open(filename, 'r', encoding='utf-8', errors='replace') as f: - notebook = json.load(f) - for cell in notebook['cells']: - if cell['cell_type'] == 'code' and cell['source']: - # remove blank lines - cell['source'] = [line for line in cell['source'] if line.strip() - != ''] - CodeBlocks.append("".join(cell['source'])) - elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']: - cell['source'] = [line for line in cell['source'] if line.strip() - != ''] - CodeBlocks.append("Markdown:"+"".join(cell['source'])) - - Code = "" - for idx, code in enumerate(CodeBlocks): - Code += f"This is {idx+1}th code block: \n" - Code += code+"\n" - - return Code - - -def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - enable_markdown = plugin_kwargs.get("advanced_arg", "1") - try: - enable_markdown = int(enable_markdown) - except ValueError: - enable_markdown = 1 - - pfg = PaperFileGroup() - - for fp in file_manifest: - file_content = parseNotebook(fp, enable_markdown=enable_markdown) - pfg.file_paths.append(fp) - pfg.file_contents.append(file_content) - - # <-------- 拆分过长的IPynb文件 ----------> - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - inputs_array = [r"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." + - r"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " + - r"Start a new line for a block and block num use Chinese." + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional programmer."] * n_split - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len=80 - ) - - # <-------- 整理结果,退出 ----------> - block_result = " \n".join(gpt_response_collection) - chatbot.append(("解析的结果如下", block_result)) - history.extend(["解析的结果如下", block_result]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------- 写入文件,退出 ----------> - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - -@CatchException -def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - chatbot.append([ - "函数插件功能?", - "对IPynb文件进行解析。Contributor: codycjy."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - history = [] # 清空历史 - import glob - import os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": - txt = '空空如也的输入栏' - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if txt.endswith('.ipynb'): - file_manifest = [txt] - else: - file_manifest = [f for f in glob.glob( - f'{project_folder}/**/*.ipynb', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, ) diff --git "a/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" "b/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" index dfd0de0ef5c1d613cc57a5fe80efd5d594991d41..8db6dc4c465608c50d075d9e39883fd9ee9108cc 100644 --- "a/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" +++ "b/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" @@ -1,375 +1,149 @@ -from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion -from toolbox import CatchException, report_exception, write_history_to_file -from .crazy_utils import input_clipping +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down +fast_debug = False -def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import os, copy - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - disable_auto_promotion(chatbot=chatbot) - - summary_batch_isolation = True - inputs_array = [] - inputs_show_user_array = [] - history_array = [] - sys_prompt_array = [] - report_part_1 = [] - - assert len(file_manifest) <= 512, "源文件太多(超过512个), 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。" - ############################## <第一步,逐个文件分析,多线程> ################################## +def 解析源代码(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): + import time, glob, os + print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): - # 读取文件 - with open(fp, 'r', encoding='utf-8', errors='replace') as f: + with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() + prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}' - # 装载请求内容 - inputs_array.append(i_say) - inputs_show_user_array.append(i_say_show_user) - history_array.append([]) - sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。") - - # 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到chatgpt进行分析 - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array = inputs_array, - inputs_show_user_array = inputs_show_user_array, - history_array = history_array, - sys_prompt_array = sys_prompt_array, - llm_kwargs = llm_kwargs, - chatbot = chatbot, - show_user_at_complete = True - ) - - # 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析 - report_part_1 = copy.deepcopy(gpt_response_collection) - history_to_return = report_part_1 - res = write_history_to_file(report_part_1) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。")) - yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面 - - ############################## <第二步,综合,单线程,分组+迭代处理> ################################## - batchsize = 16 # 10个文件为一组 - report_part_2 = [] - previous_iteration_files = [] - last_iteration_result = "" - while True: - if len(file_manifest) == 0: break - this_iteration_file_manifest = file_manifest[:batchsize] - this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2] - file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)] - # 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}" - for index, content in enumerate(this_iteration_gpt_response_collection): - if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token - this_iteration_files = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)] - previous_iteration_files.extend(this_iteration_files) - previous_iteration_files_string = ', '.join(previous_iteration_files) - current_iteration_focus = ', '.join(this_iteration_files) - if summary_batch_isolation: focus = current_iteration_focus - else: focus = previous_iteration_files_string - i_say = f'用一张Markdown表格简要描述以下文件的功能:{focus}。根据以上分析,用一句话概括程序的整体功能。' - if last_iteration_result != "": - sys_prompt_additional = "已知某些代码的局部作用是:" + last_iteration_result + "\n请继续分析其他源代码,从而更全面地理解项目的整体功能。" - else: - sys_prompt_additional = "" - inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。' - this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection) - this_iteration_history.append(last_iteration_result) - # 裁剪input - inputs, this_iteration_history_feed = input_clipping(inputs=i_say, history=this_iteration_history, max_token_limit=2560) - result = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=inputs, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, - history=this_iteration_history_feed, # 迭代之前的分析 - sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) + i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' + chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + + if not fast_debug: + msg = '正常' + + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) + yield chatbot, history, msg + if not fast_debug: time.sleep(2) + + all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) + i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。' + chatbot.append((i_say, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + + if not fast_debug: + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - diagram_code = make_diagram(this_iteration_files, result, this_iteration_history_feed) - summary = "请用一句话概括这些文件的整体功能。\n\n" + diagram_code - summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=summary, - inputs_show_user=summary, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[i_say, result], # 迭代之前的分析 - sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) + chatbot[-1] = (i_say, gpt_say) + history.append(i_say); history.append(gpt_say) + yield chatbot, history, msg + res = write_results_to_file(history) + chatbot.append(("完成了吗?", res)) + yield chatbot, history, msg - report_part_2.extend([i_say, result]) - last_iteration_result = summary_result - file_manifest = file_manifest[batchsize:] - gpt_response_collection = gpt_response_collection[batchsize*2:] - ############################## ################################## - history_to_return.extend(report_part_2) - res = write_history_to_file(history_to_return) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面 -def make_diagram(this_iteration_files, result, this_iteration_history_feed): - from crazy_functions.diagram_fns.file_tree import build_file_tree_mermaid_diagram - return build_file_tree_mermaid_diagram(this_iteration_history_feed[0::2], this_iteration_history_feed[1::2], "项目示意图") @CatchException -def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 解析项目本身(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 - import glob - file_manifest = [f for f in glob.glob('./*.py')] + \ - [f for f in glob.glob('./*/*.py')] - project_folder = './' - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + import time, glob, os + file_manifest = [f for f in glob.glob('*.py')] + for index, fp in enumerate(file_manifest): + # if 'test_project' in fp: continue + with open(fp, 'r', encoding='utf-8') as f: + file_content = f.read() + + prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else "" + i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```' + i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' + chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + + if not fast_debug: + # ** gpt request ** + # gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature) + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) + yield chatbot, history, '正常' + time.sleep(2) + + i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。' + chatbot.append((i_say, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + + if not fast_debug: + # ** gpt request ** + # gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature, history=history) + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 + + chatbot[-1] = (i_say, gpt_say) + history.append(i_say); history.append(gpt_say) + yield chatbot, history, '正常' + res = write_results_to_file(history) + chatbot.append(("完成了吗?", res)) + yield chatbot, history, '正常' @CatchException -def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 解析一个Python项目(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + yield chatbot, history, '正常' return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + yield from 解析源代码(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) -@CatchException -def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @CatchException -def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 解析一个C项目的头文件(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ + # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + yield chatbot, history, '正常' return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + yield from 解析源代码(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) @CatchException -def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 解析一个C项目(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + yield chatbot, history, '正常' return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - + yield from 解析源代码(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) -@CatchException -def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.vue', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.less', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.sass', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.wxml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.wxss', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.mod', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - txt_pattern = plugin_kwargs.get("advanced_arg") - txt_pattern = txt_pattern.replace(",", ",") - # 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml) - pattern_include = [_.lstrip(" ,").rstrip(" ,") for _ in txt_pattern.split(",") if _ != "" and not _.strip().startswith("^")] - if not pattern_include: pattern_include = ["*"] # 不输入即全部匹配 - # 将要忽略匹配的文件后缀(例如: ^*.c, ^*.cpp, ^*.py) - pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")] - pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件 - # 将要忽略匹配的文件名(例如: ^README.md) - pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")] - # 生成正则表达式 - pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$' - pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else '' - - history.clear() - import glob, os, re - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 - maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] - if len(maybe_dir)>0 and maybe_dir[0].endswith('.extract'): - extract_folder_path = maybe_dir[0] - else: - extract_folder_path = project_folder - # 按输入的匹配模式寻找上传的非压缩文件和已解压的文件 - file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ - os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" "b/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" deleted file mode 100644 index 069d4407fb575216a5b640da9467cba616f3963f..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" +++ /dev/null @@ -1,63 +0,0 @@ -from toolbox import CatchException, update_ui, get_conf -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') - chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt=system_prompt, - retry_times_at_unknown_error=0 - ) - - history.append(txt) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - -@CatchException -def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - - chatbot.append((txt, f"正在同时咨询{llm_kwargs['llm_model']}")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt=system_prompt, - retry_times_at_unknown_error=0 - ) - - history.append(txt) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 \ No newline at end of file diff --git "a/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" "b/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" deleted file mode 100644 index 8af0fd999966bb44a79c68525a3192422233a3e5..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" +++ /dev/null @@ -1,192 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, get_conf, markdown_convertion -from crazy_functions.crazy_utils import input_clipping -from crazy_functions.agent_fns.watchdog import WatchDog -from request_llms.bridge_all import predict_no_ui_long_connection -import threading, time -import numpy as np -from .live_audio.aliyunASR import AliyunASR -import json -import re - - -def chatbot2history(chatbot): - history = [] - for c in chatbot: - for q in c: - if q in ["[ 请讲话 ]", "[ 等待GPT响应 ]", "[ 正在等您说完问题 ]"]: - continue - elif q.startswith("[ 正在等您说完问题 ]"): - continue - else: - history.append(q.strip('
').strip('
').strip('

').strip('

')) - return history - -def visualize_audio(chatbot, audio_shape): - if len(chatbot) == 0: chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"]) - chatbot[-1] = list(chatbot[-1]) - p1 = '「' - p2 = '」' - chatbot[-1][-1] = re.sub(p1+r'(.*)'+p2, '', chatbot[-1][-1]) - chatbot[-1][-1] += (p1+f"`{audio_shape}`"+p2) - -class AsyncGptTask(): - def __init__(self) -> None: - self.observe_future = [] - self.observe_future_chatbot_index = [] - - def gpt_thread_worker(self, i_say, llm_kwargs, history, sys_prompt, observe_window, index): - try: - MAX_TOKEN_ALLO = 2560 - i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO) - gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, - observe_window=observe_window[index], console_slience=True) - except ConnectionAbortedError as token_exceed_err: - print('至少一个线程任务Token溢出而失败', e) - except Exception as e: - print('至少一个线程任务意外失败', e) - - def add_async_gpt_task(self, i_say, chatbot_index, llm_kwargs, history, system_prompt): - self.observe_future.append([""]) - self.observe_future_chatbot_index.append(chatbot_index) - cur_index = len(self.observe_future)-1 - th_new = threading.Thread(target=self.gpt_thread_worker, args=(i_say, llm_kwargs, history, system_prompt, self.observe_future, cur_index)) - th_new.daemon = True - th_new.start() - - def update_chatbot(self, chatbot): - for of, ofci in zip(self.observe_future, self.observe_future_chatbot_index): - try: - chatbot[ofci] = list(chatbot[ofci]) - chatbot[ofci][1] = markdown_convertion(of[0]) - except: - self.observe_future = [] - self.observe_future_chatbot_index = [] - return chatbot - -class InterviewAssistant(AliyunASR): - def __init__(self): - self.capture_interval = 0.5 # second - self.stop = False - self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入 - self.parsed_sentence = "" # 某段话的整个句子, 由 test_on_sentence_end() 写入 - self.buffered_sentence = "" # - self.audio_shape = "" # 音频的可视化表现, 由 audio_convertion_thread() 写入 - self.event_on_result_chg = threading.Event() - self.event_on_entence_end = threading.Event() - self.event_on_commit_question = threading.Event() - - def __del__(self): - self.stop = True - self.stop_msg = "" - self.commit_wd.kill_dog = True - self.plugin_wd.kill_dog = True - - def init(self, chatbot): - # 初始化音频采集线程 - self.captured_audio = np.array([]) - self.keep_latest_n_second = 10 - self.commit_after_pause_n_second = 2.0 - self.ready_audio_flagment = None - self.stop = False - self.plugin_wd = WatchDog(timeout=5, bark_fn=self.__del__, msg="程序终止") - self.aut = threading.Thread(target=self.audio_convertion_thread, args=(chatbot._cookies['uuid'],)) - self.aut.daemon = True - self.aut.start() - # th2 = threading.Thread(target=self.audio2txt_thread, args=(chatbot._cookies['uuid'],)) - # th2.daemon = True - # th2.start() - - def no_audio_for_a_while(self): - if len(self.buffered_sentence) < 7: # 如果一句话小于7个字,暂不提交 - self.commit_wd.begin_watch() - else: - self.event_on_commit_question.set() - - def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - # main plugin function - self.init(chatbot) - chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - self.plugin_wd.begin_watch() - self.agt = AsyncGptTask() - self.commit_wd = WatchDog(timeout=self.commit_after_pause_n_second, bark_fn=self.no_audio_for_a_while, interval=0.2) - self.commit_wd.begin_watch() - - while not self.stop: - self.event_on_result_chg.wait(timeout=0.25) # run once every 0.25 second - chatbot = self.agt.update_chatbot(chatbot) # 将子线程的gpt结果写入chatbot - history = chatbot2history(chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - self.plugin_wd.feed() - - if self.event_on_result_chg.is_set(): - # called when some words have finished - self.event_on_result_chg.clear() - chatbot[-1] = list(chatbot[-1]) - chatbot[-1][0] = self.buffered_sentence + self.parsed_text - history = chatbot2history(chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - self.commit_wd.feed() - - if self.event_on_entence_end.is_set(): - # called when a sentence has ended - self.event_on_entence_end.clear() - self.parsed_text = self.parsed_sentence - self.buffered_sentence += self.parsed_text - chatbot[-1] = list(chatbot[-1]) - chatbot[-1][0] = self.buffered_sentence - history = chatbot2history(chatbot) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if self.event_on_commit_question.is_set(): - # called when a question should be commited - self.event_on_commit_question.clear() - if len(self.buffered_sentence) == 0: raise RuntimeError - - self.commit_wd.begin_watch() - chatbot[-1] = list(chatbot[-1]) - chatbot[-1] = [self.buffered_sentence, "[ 等待GPT响应 ]"] - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # add gpt task 创建子线程请求gpt,避免线程阻塞 - history = chatbot2history(chatbot) - self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt) - - self.buffered_sentence = "" - chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not self.event_on_result_chg.is_set() and not self.event_on_entence_end.is_set() and not self.event_on_commit_question.is_set(): - visualize_audio(chatbot, self.audio_shape) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if len(self.stop_msg) != 0: - raise RuntimeError(self.stop_msg) - - - -@CatchException -def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # pip install -U openai-whisper - chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import nls - from scipy import io - except: - chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - APPKEY = get_conf('ALIYUN_APPKEY') - if APPKEY == "": - chatbot.append(["导入依赖失败", "没有阿里云语音识别APPKEY和TOKEN, 详情见https://help.aliyun.com/document_detail/450255.html"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - ia = InterviewAssistant() - yield from ia.begin(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - diff --git "a/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" "b/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" index 48222a6ddd9719cb4375972d4353286bcb43fbd4..a1aee369560fd1267d89fb6c59348bc41082876f 100644 --- "a/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" +++ "b/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" @@ -1,64 +1,70 @@ -from toolbox import update_ui -from toolbox import CatchException, report_exception -from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down +fast_debug = False -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): +def 解析Paper(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): import time, glob, os print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: + with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + print('[1] yield chatbot, history') + yield chatbot, history, '正常' - msg = '正常' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - time.sleep(2) + if not fast_debug: + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + + print('[2] end gpt req') + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) + print('[3] yield chatbot, history') + yield chatbot, history, msg + print('[4] next') + if not fast_debug: time.sleep(2) all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + yield chatbot, history, '正常' - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时 + if not fast_debug: + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(api, i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 + chatbot[-1] = (i_say, gpt_say) + history.append(i_say); history.append(gpt_say) + yield chatbot, history, msg + res = write_results_to_file(history) + chatbot.append(("完成了吗?", res)) + yield chatbot, history, msg @CatchException -def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): +def 读文章写摘要(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + yield chatbot, history, '正常' return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + yield from 解析Paper(api, file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" deleted file mode 100644 index 8b7ea3ffce15e0ca6f0017987a974a3e4d183810..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" +++ /dev/null @@ -1,185 +0,0 @@ -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_exception, promote_file_to_downloadzone -from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file -import logging -import requests -import time -import random - -ENABLE_ALL_VERSION_SEARCH = True - -def get_meta_information(url, chatbot, history): - import arxiv - import difflib - import re - from bs4 import BeautifulSoup - from toolbox import get_conf - from urllib.parse import urlparse - session = requests.session() - - proxies = get_conf('proxies') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', - 'Cache-Control':'max-age=0', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', - 'Connection': 'keep-alive' - } - try: - session.proxies.update(proxies) - except: - report_exception(chatbot, history, - a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。", - b=f"尝试直接连接") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - session.headers.update(headers) - - response = session.get(url) - # 解析网页内容 - soup = BeautifulSoup(response.text, "html.parser") - - def string_similar(s1, s2): - return difflib.SequenceMatcher(None, s1, s2).quick_ratio() - - if ENABLE_ALL_VERSION_SEARCH: - def search_all_version(url): - time.sleep(random.randint(1,5)) # 睡一会防止触发google反爬虫 - response = session.get(url) - soup = BeautifulSoup(response.text, "html.parser") - - for result in soup.select(".gs_ri"): - try: - url = result.select_one(".gs_rt").a['href'] - except: - continue - arxiv_id = extract_arxiv_id(url) - if not arxiv_id: - continue - search = arxiv.Search( - id_list=[arxiv_id], - max_results=1, - sort_by=arxiv.SortCriterion.Relevance, - ) - try: paper = next(search.results()) - except: paper = None - return paper - - return None - - def extract_arxiv_id(url): - # 返回给定的url解析出的arxiv_id,如url未成功匹配返回None - pattern = r'arxiv.org/abs/([^/]+)' - match = re.search(pattern, url) - if match: - return match.group(1) - else: - return None - - profile = [] - # 获取所有文章的标题和作者 - for result in soup.select(".gs_ri"): - title = result.a.text.replace('\n', ' ').replace(' ', ' ') - author = result.select_one(".gs_a").text - try: - citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来 - except: - citation = 'cited by 0' - abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格 - - # 首先在arxiv上搜索,获取文章摘要 - search = arxiv.Search( - query = title, - max_results = 1, - sort_by = arxiv.SortCriterion.Relevance, - ) - try: paper = next(search.results()) - except: paper = None - - is_match = paper is not None and string_similar(title, paper.title) > 0.90 - - # 如果在Arxiv上匹配失败,检索文章的历史版本的题目 - if not is_match and ENABLE_ALL_VERSION_SEARCH: - other_versions_page_url = [tag['href'] for tag in result.select_one('.gs_flb').select('.gs_nph') if 'cluster' in tag['href']] - if len(other_versions_page_url) > 0: - other_versions_page_url = other_versions_page_url[0] - paper = search_all_version('http://' + urlparse(url).netloc + other_versions_page_url) - is_match = paper is not None and string_similar(title, paper.title) > 0.90 - - if is_match: - # same paper - abstract = paper.summary.replace('\n', ' ') - is_paper_in_arxiv = True - else: - # different paper - abstract = abstract - is_paper_in_arxiv = False - - logging.info('[title]:' + title) - logging.info('[author]:' + author) - logging.info('[citation]:' + citation) - - profile.append({ - 'title': title, - 'author': author, - 'citation': citation, - 'abstract': abstract, - 'is_paper_in_arxiv': is_paper_in_arxiv, - }) - - chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract] - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - return profile - -@CatchException -def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - disable_auto_promotion(chatbot=chatbot) - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import arxiv - import math - from bs4 import BeautifulSoup - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - meta_paper_info_list = yield from get_meta_information(txt, chatbot, history) - if len(meta_paper_info_list) == 0: - yield from update_ui_lastest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0) - return - batchsize = 5 - for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)): - if len(meta_paper_info_list[:batchsize]) > 0: - i_say = "下面是一些学术文献的数据,提取出以下内容:" + \ - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \ - f"以下是信息源:{str(meta_paper_info_list[:batchsize])}" - - inputs_show_user = f"请分析此页面中出现的所有文章:{txt},这是第{batch+1}批" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown表格。你必须逐个文献进行处理。" - ) - - history.extend([ f"第{batch+1}批", gpt_say ]) - meta_paper_info_list = meta_paper_info_list[batchsize:] - - chatbot.append(["状态?", - "已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."]) - msg = '正常' - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - path = write_history_to_file(history) - promote_file_to_downloadzone(path, chatbot=chatbot) - chatbot.append(("完成了吗?", path)); - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 diff --git "a/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" "b/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" deleted file mode 100644 index 10f71ed6f1b82af08ba47e975195db7142b90942..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" +++ /dev/null @@ -1,54 +0,0 @@ -# encoding: utf-8 -# @Time : 2023/4/19 -# @Author : Spike -# @Descr : -from toolbox import update_ui, get_conf, get_user -from toolbox import CatchException -from toolbox import default_user_name -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import shutil -import os - - -@CatchException -def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - if txt: - show_say = txt - prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。' - else: - prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。" - show_say = '分析上述回答,再列出用户可能提出的三个问题。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, - inputs_show_user=show_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt=system_prompt - ) - chatbot[-1] = (show_say, gpt_say) - history.extend([show_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -@CatchException -def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - chatbot.append(['清除本地缓存数据', '执行中. 删除数据']) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - def _get_log_folder(user=default_user_name): - PATH_LOGGING = get_conf('PATH_LOGGING') - _dir = os.path.join(PATH_LOGGING, user) - if not os.path.exists(_dir): os.makedirs(_dir) - return _dir - - def _get_upload_folder(user=default_user_name): - PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') - _dir = os.path.join(PATH_PRIVATE_UPLOAD, user) - return _dir - - shutil.rmtree(_get_log_folder(get_user(chatbot)), ignore_errors=True) - shutil.rmtree(_get_upload_folder(get_user(chatbot)), ignore_errors=True) - - chatbot.append(['清除本地缓存数据', '执行完成']) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 \ No newline at end of file diff --git "a/crazy_functions/\350\276\205\345\212\251\345\233\236\347\255\224.py" "b/crazy_functions/\350\276\205\345\212\251\345\233\236\347\255\224.py" deleted file mode 100644 index b635f88b3183bbd310eca6449cd9e10c75ca7ca7..0000000000000000000000000000000000000000 --- "a/crazy_functions/\350\276\205\345\212\251\345\233\236\347\255\224.py" +++ /dev/null @@ -1,28 +0,0 @@ -# encoding: utf-8 -# @Time : 2023/4/19 -# @Author : Spike -# @Descr : -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - - -@CatchException -def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - if txt: - show_say = txt - prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。' - else: - prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。" - show_say = '分析上述回答,再列出用户可能提出的三个问题。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=prompt, - inputs_show_user=show_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt=system_prompt - ) - chatbot[-1] = (show_say, gpt_say) - history.extend([show_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 \ No newline at end of file diff --git "a/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" index d22a67411a2c94a06c342d041627c9f0afc031e7..c5df9b84f8cee8ffea32db6ef49097671b14cdc9 100644 --- "a/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" +++ "b/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" @@ -1,99 +1,17 @@ -from toolbox import CatchException, update_ui -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime - -高阶功能模板函数示意图 = f""" -```mermaid -flowchart TD - %% 一个特殊标记,用于在生成mermaid图表时隐藏代码块 - subgraph 函数调用["函数调用过程"] - AA["输入栏用户输入的文本(txt)"] --> BB["gpt模型参数(llm_kwargs)"] - BB --> CC["插件模型参数(plugin_kwargs)"] - CC --> DD["对话显示框的句柄(chatbot)"] - DD --> EE["对话历史(history)"] - EE --> FF["系统提示词(system_prompt)"] - FF --> GG["当前用户信息(web_port)"] - - A["开始(查询5天历史事件)"] - A --> B["获取当前月份和日期"] - B --> C["生成历史事件查询提示词"] - C --> D["调用大模型"] - D --> E["更新界面"] - E --> F["记录历史"] - F --> |"下一天"| B - end -``` -""" +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down +fast_debug = False @CatchException -def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - # 高阶功能模板函数示意图:https://mermaid.live/edit#pako:eNptk1tvEkEYhv8KmattQpvlvOyFCcdeeaVXuoYssBwie8gyhCIlqVoLhrbbtAWNUpEGUkyMEDW2Fmn_DDOL_8LZHdOwxrnamX3f7_3mmZk6yKhZCfAgV1KrmYKoQ9fDuKC4yChX0nld1Aou1JzjznQ5fWmejh8LYHW6vG2a47YAnlCLNSIRolnenKBXI_zRIBrcuqRT890u7jZx7zMDt-AaMbnW1--5olGiz2sQjwfoQxsZL0hxplSSU0-rop4vrzmKR6O2JxYjHmwcL2Y_HDatVMkXlf86YzHbGY9bO5j8XE7O8Nsbc3iNB3ukL2SMcH-XIQBgWoVOZzxuOxOJOyc63EPGV6ZQLENVrznViYStTiaJ2vw2M2d9bByRnOXkgCnXylCSU5quyto_IcmkbdvctELmJ-j1ASW3uB3g5xOmKqVTmqr_Na3AtuS_dtBFm8H90XJyHkDDT7S9xXWb4HGmRChx64AOL5HRpUm411rM5uh4H78Z4V7fCZzytjZz2seto9XaNPFue07clLaVZF8UNLygJ-VES8lah_n-O-5Ozc7-77NzJ0-K0yr0ZYrmHdqAk50t2RbA4qq9uNohBASw7YpSgaRkLWCCAtxAlnRZLGbJba9bPwUAC5IsCYAnn1kpJ1ZKUACC0iBSsQLVBzUlA3ioVyQ3qGhZEUrxokiehAz4nFgqk1VNVABfB1uAD_g2_AGPl-W8nMcbCvsDblADfNCz4feyobDPy3rYEMtxwYYbPFNVUoHdCPmDHBv2cP4AMfrCbiBli-Q-3afv0X6WdsIjW2-10fgDy1SAig - - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ +def 高阶功能模板函数(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 - chatbot.append(( - "您正在调用插件:历史上的今天", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!" + 高阶功能模板函数示意图)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 for i in range(5): - currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month - currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day - i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - + i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}' + chatbot.append((i_say, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示 + gpt_say = predict_no_ui(api, inputs=i_say, top_p=top_p, temperature=temperature) # 请求gpt,需要一段时间 - -PROMPT = """ -请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例: -```mermaid -graph TD - P(编程) --> L1(Python) - P(编程) --> L2(C) - P(编程) --> L3(C++) - P(编程) --> L4(Javascipt) - P(编程) --> L5(PHP) -``` -""" -@CatchException -def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - user_request 当前用户的请求信息(IP地址等) - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一些关键词,然后使用mermaid+llm绘制图表。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - if txt == "": txt = "空白的输入栏" # 调皮一下 - - i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。' - i_say = PROMPT.format(subject=txt) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="" - ) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 \ No newline at end of file + chatbot[-1] = (i_say, gpt_say) + history.append(i_say);history.append(gpt_say) + yield chatbot, history, '正常' # 显示 \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 2090c5aa90f728e16d1d0254feb03cddefff12a1..0000000000000000000000000000000000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,231 +0,0 @@ -## =================================================== -# docker-compose.yml -## =================================================== -# 1. 请在以下方案中选择任意一种,然后删除其他的方案 -# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py -# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改: - # 【方法1: 适用于Linux,很方便,可惜windows不支持】与宿主的网络融合为一体,这个是默认配置 - # network_mode: "host" - # 【方法2: 适用于所有系统包括Windows和MacOS】端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容) - # ports: - # - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应 -# 4. 最后`docker-compose up`运行 -# 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项 -## =================================================== -# 1. Please choose one of the following options and delete the others. -# 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details. -# 3. Choose a method to expose the server port and make the corresponding configuration changes: - # [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration - # network_mode: "host" - # [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content) - # ports: - # - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable. -# 4. Finally, run `docker-compose up`. -# 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options. -## =================================================== - -## =================================================== -## 【方案零】 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个) -## =================================================== -version: '3' -services: - gpt_academic_full_capability: - image: ghcr.io/binary-husky/gpt_academic_with_all_capacity:master - environment: - # 请查阅 `config.py`或者 github wiki 以查看所有的配置信息 - API_KEY: ' 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 ' - # USE_PROXY: ' True ' - # proxies: ' { "http": "http://localhost:10881", "https": "http://localhost:10881", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4", "qianfan", "sparkv2", "spark", "chatglm"] ' - BAIDU_CLOUD_API_KEY : ' bTUtwEAveBrQipEowUvDwYWq ' - BAIDU_CLOUD_SECRET_KEY : ' jqXtLvXiVw6UNdjliATTS61rllG8Iuni ' - XFYUN_APPID: ' 53a8d816 ' - XFYUN_API_SECRET: ' MjMxNDQ4NDE4MzM0OSNlNjQ2NTlhMTkx ' - XFYUN_API_KEY: ' 95ccdec285364869d17b33e75ee96447 ' - ENABLE_AUDIO: ' False ' - DEFAULT_WORKER_NUM: ' 20 ' - WEB_PORT: ' 12345 ' - ADD_WAIFU: ' False ' - ALIYUN_APPKEY: ' RxPlZrM88DnAFkZK ' - THEME: ' Chuanhu-Small-and-Beautiful ' - ALIYUN_ACCESSKEY: ' LTAI5t6BrFUzxRXVGUWnekh1 ' - ALIYUN_SECRET: ' eHmI20SVWIwQZxCiTD2bGQVspP9i68 ' - # LOCAL_MODEL_DEVICE: ' cuda ' - - # 加载英伟达显卡运行时 - # runtime: nvidia - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [gpu] - - # 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合 - network_mode: "host" - - # 【WEB_PORT暴露方法2: 适用于所有系统】端口映射 - # ports: - # - "12345:12345" # 12345必须与WEB_PORT相互对应 - - # 启动容器后,运行main.py主程序 - command: > - bash -c "python3 -u main.py" - - - - -## =================================================== -## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务) -## =================================================== -version: '3' -services: - gpt_academic_nolocalllms: - image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal) - environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "sparkv2", "qianfan"] ' - WEB_PORT: ' 22303 ' - ADD_WAIFU: ' True ' - # THEME: ' Chuanhu-Small-and-Beautiful ' - # DEFAULT_WORKER_NUM: ' 10 ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' - - # 与宿主的网络融合 - network_mode: "host" - - # 不使用代理网络拉取最新代码 - command: > - bash -c "python3 -u main.py" - - -### =================================================== -### 【方案二】 如果需要运行ChatGLM + Qwen + MOSS等本地模型 -### =================================================== -version: '3' -services: - gpt_academic_with_chatglm: - image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM) - environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["chatglm", "qwen", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12303 ' - ADD_WAIFU: ' True ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' - - # 显卡的使用,nvidia0指第0个GPU - runtime: nvidia - devices: - - /dev/nvidia0:/dev/nvidia0 - - # 与宿主的网络融合 - network_mode: "host" - command: > - bash -c "python3 -u main.py" - - # P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖 - # command: > - # bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py" - -### =================================================== -### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 -### =================================================== -version: '3' -services: - gpt_academic_with_rwkv: - image: ghcr.io/binary-husky/gpt_academic_jittorllms:master - environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12305 ' - ADD_WAIFU: ' True ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' - - # 显卡的使用,nvidia0指第0个GPU - runtime: nvidia - devices: - - /dev/nvidia0:/dev/nvidia0 - - # 与宿主的网络融合 - network_mode: "host" - - # 不使用代理网络拉取最新代码 - command: > - python3 -u main.py - - -## =================================================== -## 【方案四】 ChatGPT + Latex -## =================================================== -version: '3' -services: - gpt_academic_with_latex: - image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex) - environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12303 ' - - # 与宿主的网络融合 - network_mode: "host" - - # 不使用代理网络拉取最新代码 - command: > - bash -c "python3 -u main.py" - - -## =================================================== -## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md) -## =================================================== -version: '3' -services: - gpt_academic_with_audio: - image: ghcr.io/binary-husky/gpt_academic_audio_assistant:master - environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' fk195831-IdP0Pb3W6DCMUIbQwVX6MsSiyxwqybyS ' - USE_PROXY: ' False ' - proxies: ' None ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] ' - ENABLE_AUDIO: ' True ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 20 ' - WEB_PORT: ' 12343 ' - ADD_WAIFU: ' True ' - THEME: ' Chuanhu-Small-and-Beautiful ' - ALIYUN_APPKEY: ' RoP1ZrM84DnAFkZK ' - ALIYUN_TOKEN: ' f37f30e0f9934c34a992f6f64f7eba4f ' - # (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 ' - # (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 ' - - # 与宿主的网络融合 - network_mode: "host" - - # 不使用代理网络拉取最新代码 - command: > - bash -c "python3 -u main.py" diff --git a/docs/Dockerfile+ChatGLM b/docs/Dockerfile+ChatGLM deleted file mode 100644 index 7777bf26716e9206de62a51da2d47eadaff0ed15..0000000000000000000000000000000000000000 --- a/docs/Dockerfile+ChatGLM +++ /dev/null @@ -1 +0,0 @@ -# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss diff --git a/docs/Dockerfile+JittorLLM b/docs/Dockerfile+JittorLLM deleted file mode 100644 index b10be8075b3212c6231e6574af29932de6d35aa6..0000000000000000000000000000000000000000 --- a/docs/Dockerfile+JittorLLM +++ /dev/null @@ -1 +0,0 @@ -# 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs diff --git a/docs/Dockerfile+NoLocal+Latex b/docs/Dockerfile+NoLocal+Latex deleted file mode 100644 index a0f162af784263c47e0df85b4ba1ef49e031bf8c..0000000000000000000000000000000000000000 --- a/docs/Dockerfile+NoLocal+Latex +++ /dev/null @@ -1 +0,0 @@ -# 此Dockerfile不再维护,请前往docs/GithubAction+NoLocal+Latex diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity deleted file mode 100644 index 4ba0e31a9e1e7858059e179e97ff2648dd898c58..0000000000000000000000000000000000000000 --- a/docs/GithubAction+AllCapacity +++ /dev/null @@ -1,36 +0,0 @@ -# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 . - -# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest - -# use python3 as the system default python -WORKDIR /gpt -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 -# 下载pytorch -RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 -# 准备pip依赖 -RUN python3 -m pip install openai numpy arxiv rich -RUN python3 -m pip install colorama Markdown pygments pymupdf -RUN python3 -m pip install python-docx moviepy pdfminer -RUN python3 -m pip install zh_langchain==0.2.1 pypinyin -RUN python3 -m pip install rarfile py7zr -RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git -# 下载分支 -WORKDIR /gpt -RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss - -RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llms/requirements_moss.txt -RUN python3 -m pip install -r request_llms/requirements_qwen.txt -RUN python3 -m pip install -r request_llms/requirements_chatglm.txt -RUN python3 -m pip install -r request_llms/requirements_newbing.txt -RUN python3 -m pip install nougat-ocr - - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+AllCapacityBeta b/docs/GithubAction+AllCapacityBeta deleted file mode 100644 index d3a06ee19860f49e76c836add5c6ff65aa0517ff..0000000000000000000000000000000000000000 --- a/docs/GithubAction+AllCapacityBeta +++ /dev/null @@ -1,53 +0,0 @@ -# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 . -# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacityBeta --network=host . -# docker run -it --net=host gpt-academic-all-capacity bash - -# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest - -# use python3 as the system default python -WORKDIR /gpt -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 - -# # 非必要步骤,更换pip源 (以下三行,可以删除) -# RUN echo '[global]' > /etc/pip.conf && \ -# echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ -# echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf - -# 下载pytorch -RUN python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113 -# 准备pip依赖 -RUN python3 -m pip install openai numpy arxiv rich -RUN python3 -m pip install colorama Markdown pygments pymupdf -RUN python3 -m pip install python-docx moviepy pdfminer -RUN python3 -m pip install zh_langchain==0.2.1 pypinyin -RUN python3 -m pip install rarfile py7zr -RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git -# 下载分支 -WORKDIR /gpt -RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss - -RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llms/requirements_moss.txt -RUN python3 -m pip install -r request_llms/requirements_qwen.txt -RUN python3 -m pip install -r request_llms/requirements_chatglm.txt -RUN python3 -m pip install -r request_llms/requirements_newbing.txt -RUN python3 -m pip install nougat-ocr - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 安装知识库插件的额外依赖 -RUN apt-get update && apt-get install libgl1 -y -RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade -RUN pip3 install unstructured[all-docs] --upgrade -RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()' -RUN rm -rf /usr/local/lib/python3.8/dist-packages/tests - - -# COPY .cache /root/.cache -# COPY config_private.py config_private.py -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss deleted file mode 100644 index 3212dc2f4d17425696e0a67a35e90cb81b6a99e5..0000000000000000000000000000000000000000 --- a/docs/GithubAction+ChatGLM+Moss +++ /dev/null @@ -1,30 +0,0 @@ - -# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -RUN apt-get update -RUN apt-get install -y curl proxychains curl gcc -RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing - - -# use python3 as the system default python -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 -# 下载pytorch -RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 -# 下载分支 -WORKDIR /gpt -RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss -RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llms/requirements_moss.txt -RUN python3 -m pip install -r request_llms/requirements_qwen.txt -RUN python3 -m pip install -r request_llms/requirements_chatglm.txt -RUN python3 -m pip install -r request_llms/requirements_newbing.txt - - - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+JittorLLMs b/docs/GithubAction+JittorLLMs deleted file mode 100644 index 189eb24431e4778367e08d359949a260e9426194..0000000000000000000000000000000000000000 --- a/docs/GithubAction+JittorLLMs +++ /dev/null @@ -1,34 +0,0 @@ -# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3) -FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 -ARG useProxyNetwork='' -RUN apt-get update -RUN apt-get install -y curl proxychains curl g++ -RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing - -# use python3 as the system default python -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 - -# 下载pytorch -RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 - -# 下载分支 -WORKDIR /gpt -RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -WORKDIR /gpt/gpt_academic -RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llms/requirements_chatglm.txt -RUN python3 -m pip install -r request_llms/requirements_newbing.txt -RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I - -# 下载JittorLLMs -RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms - -# 禁用缓存,确保更新代码 -ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache -RUN git pull - -# 预热Tiktoken模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+NoLocal b/docs/GithubAction+NoLocal deleted file mode 100644 index 5c49b948121a0f5c1f1f9abd264d2673d375ad30..0000000000000000000000000000000000000000 --- a/docs/GithubAction+NoLocal +++ /dev/null @@ -1,20 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal -f docs/Dockerfile+NoLocal . -# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal -FROM python:3.11 - -# 指定路径 -WORKDIR /gpt - -# 装载项目文件 -COPY . . - -# 安装依赖 -RUN pip3 install -r requirements.txt - - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+NoLocal+AudioAssistant b/docs/GithubAction+NoLocal+AudioAssistant deleted file mode 100644 index 6d6dab0a5bfce8b62ce8c292924a6303229d425d..0000000000000000000000000000000000000000 --- a/docs/GithubAction+NoLocal+AudioAssistant +++ /dev/null @@ -1,22 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal -f docs/Dockerfile+NoLocal . -# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal -FROM python:3.11 - -# 指定路径 -WORKDIR /gpt - -# 装载项目文件 -COPY . . - -# 安装依赖 -RUN pip3 install -r requirements.txt - -# 安装语音插件的额外依赖 -RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+NoLocal+Latex b/docs/GithubAction+NoLocal+Latex deleted file mode 100644 index 95dd4b82729da1cc611c1c245c9ed2c54ba0cde3..0000000000000000000000000000000000000000 --- a/docs/GithubAction+NoLocal+Latex +++ /dev/null @@ -1,35 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# - 1 修改 `config.py` -# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex . -# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex - -FROM fuqingxu/python311_texlive_ctex:latest -ENV PATH "$PATH:/usr/local/texlive/2022/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2023/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2024/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2025/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2026/bin/x86_64-linux" - -# 删除文档文件以节约空间 -RUN rm -rf /usr/local/texlive/2023/texmf-dist/doc - -# 指定路径 -WORKDIR /gpt - -RUN pip3 install openai numpy arxiv rich -RUN pip3 install colorama Markdown pygments pymupdf -RUN pip3 install python-docx pdfminer -RUN pip3 install nougat-ocr - -# 装载项目文件 -COPY . . - - -# 安装依赖 -RUN pip3 install -r requirements.txt - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+NoLocal+Vectordb b/docs/GithubAction+NoLocal+Vectordb deleted file mode 100644 index 45074d935d664fa2754230dcdc0c056f5ace047a..0000000000000000000000000000000000000000 --- a/docs/GithubAction+NoLocal+Vectordb +++ /dev/null @@ -1,26 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb . -# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs -FROM python:3.11 - -# 指定路径 -WORKDIR /gpt - -# 装载项目文件 -COPY . . - -# 安装依赖 -RUN pip3 install -r requirements.txt - -# 安装知识库插件的额外依赖 -RUN apt-get update && apt-get install libgl1 -y -RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu -RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade -RUN pip3 install unstructured[all-docs] --upgrade -RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()' - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/docs/README.Arabic.md b/docs/README.Arabic.md deleted file mode 100644 index 5d8bf3cc31d118fab8d8523bebffce77593abb5c..0000000000000000000000000000000000000000 --- a/docs/README.Arabic.md +++ /dev/null @@ -1,343 +0,0 @@ - - - -> **ملحوظة** -> -> تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة. -> -> 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`. - -#
GPT الأكاديمي
- -**إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي). - -> **ملحوظة** -> -> 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة. -> -> 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). -> -> 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول. - - - -
- -الوظائف (⭐= وظائف مُضافة حديثًا) | الوصف ---- | --- -⭐[التوصل لنموذج جديد](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | بحث بيدو[تشيان فان](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) ووينسين[جينرال](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary)، مختبرات شنغهاي للذكاء الصناعي[شو شينغ](https://github.com/InternLM/InternLM)، إكسنفلام[زينغهو]https://xinghuo.xfyun.cn/)، [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)، واجهة بيانية ذكية و3 خدمات إضافية [DALLE3] -الجودة الفائقة، الترجمة، شرح الكود | الإصلاح الفوري للاخطاء النحوية في الأبحاث وترجمة وتحسين التصريف اللغوي للأكواد -[اختصارات مخصصة](https://www.bilibili.com/video/BV14s4y1E7jN) | دعم الاختصارات المخصصة -تصميم قابل للتوسيع | دعم الإضافات القوية المخصصة (الوظائف)، الإضافات قابلة للتحديث بشكل فوري -[تحليل البرنامج](https://www.bilibili.com/video/BV1cj411A7VW) | [وظائف] التحليل الشجري بناءً على البرنامج من Python/C/C++/Java/Lua/..., أو [التحليل الذاتي](https://www.bilibili.com/video/BV1cj411A7VW) -قراءة وترجمة الأبحاث | [وظائف] فك تشفير كامل لأوراق البحث بتنسيق LaTeX/PDF وإنشاء مستخلص -ترجمة وتحسين أوراق اللاتكس | [وظائف] ترجمة أو تحسين الأوراق المكتوبة بلاتكس -إنشاء تعليقات الدوال دفعة واحدة | [وظائف] إنشاء تعليقات الدوال بدفعة واحدة -ترجمة Markdown بين اللغتين العربية والإنجليزية | [وظائف] هل رأيت الـ 5 لغات المستخدمة في منشور [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) ؟ -إنشاء تقرير تحليل الدردشة | [وظائف] إنشاء تقرير ملخص بعد تشغيله -ترجمة كاملة لأوراق PDF | [وظائف] تحليل الأوراق بتنسيق PDF لتحديد العنوان وملخصها وترجمتها (متعدد الخيوط) -مساعدة Arxiv | [وظائف] قم بإدخال رابط مقال Arxiv لترجمة الملخص وتحميل ملف PDF -تصحيح لاتكس بضغطة زر واحدة | [وظائف] إكمال تصحيح لاتكس بناءً على التركيبة النحوية، إخراج همز المقابل للمقارنة PDF -مساعد بحث Google بنسخة محلية | [وظائف] قم بتقديم رابط لصفحة بحث Google Scholar العشوائي حتى يساعدك GPT في كتابة [الأبحاث المتعلقة](https://www.bilibili.com/video/BV1GP411U7Az/) -تجميع معلومات الويب + GPT | [وظائف] جمع المعلومات من الويب بشكل سهل للرد على الأسئلة لجعل المعلومات محدثة باستمرار -⭐ترجمة دقيقة لأوراق Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [وظائف] ترجمة مقالات Arxiv عالية الجودة بنقرة واحدة، أفضل أداة حاليا للترجمة -⭐[إدخال الصوت الفوري](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [وظائف] (غير متزامن) استماع الصوت وقطعه تلقائيًا وتحديد وقت الإجابة تلقائيًا -عرض الصيغ/الصور/الجداول | يمكن عرض الصيغ بشكل [TEX](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) وأيضًا بتنسيق رسومي، يدعم عرض الصيغ وإبراز الكود -⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل -تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح -دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS) -⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله -دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/) -⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير) -⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع -المزيد من العروض (إنشاء الصور وغيرها)……| شاهد أكثر في نهاية هذا المستند ... -
- - -- شكل جديد (عن طريق تعديل الخيار LAYOUT في `config.py` لقانون التوزيع "اليمين أو اليسار" أو "الأعلى أو الأسفل") -
- -
- - -- جميع الأزرار يتم إنشاؤها ديناميكيًا من خلال قراءة functional.py ويمكن إضافة وظائف مخصصة بحرية وتحرير الحافظة -
- -
- -- التجميل / التحوير -
- -
- - - -- إذا تضمّن الإخراج معادلات، فسيتم عرضها بشكلٍ يمكّن من النسخ والقراءة على النحوين: TEX ورسومية. -
- -
- -- هل تشعر بالكسل من قراءة كود المشروع؟ قم بمدها مباشرةً إلى ChatGPT -
- -
- -- دمج نماذج اللغات الكبيرة المختلفة (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Installation -### طريقة التثبيت الأولى: التشغيل المباشر (Windows، Linux أو MacOS) - -1. قم بتنزيل المشروع -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. قم بتكوين لغة البرمجة Python - -في ملف `config.py`، قم بتكوين مفتاح الواجهة API والإعدادات الأخرى، [انقر هنا للاطلاع على طريقة تكوين الإعدادات في بيئة شبكة خاصة](https://github.com/binary-husky/gpt_academic/issues/1). [انقر هنا لزيارة صفحة الويكي](https://github.com/binary-husky/gpt_academic/wiki/توضيحات-تكوين-المشروع). - -" ستقوم البرنامج بفحص وجود ملف تكوين خاص يسمى `config_private.py` بأولوية، وسيستخدم التكوينات الموجودة فيه لتجاوز التكوينات ذات الأسماء المطابقة في `config.py`. إذا كنت تفهم هذه الطريقة ونظام القراءة، فإننا نوصي بشدة بإنشاء ملف تكوين جديد يسمى `config_private.py` بجوار `config.py` ونقل (نسخ) التكوينات الموجودة في `config.py` إلى `config_private.py` (يجب نسخ العناصر التي قمت بتعديلها فقط). " - -" يدعم المشروع التكوين من خلال `المتغيرات المحيطية`، ويمكن تحديد تنسيق كتابة المتغيرات المحيطية من خلال ملف `docker-compose.yml` أو صفحة الويكي الخاصة بنا. تعتمد أولوية القراءة على التكوينات على التالي: `المتغيرات المحيطية` > `config_private.py` > `config.py`. " - -3. قم بتثبيت التبعيات -```sh -# (الخيار الأول: إذا كنت تعرف Python، python>=3.9) الملحوظة: استخدم مستودع pip الرسمي أو مستودع pip آلي بباي، يمكن تغيير المستودع المؤقت بواسطة الأمر: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (الخيار الثاني: باستخدام Anaconda) الخطوات مشابهة (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # إنشاء بيئة Anaconda -conda activate gptac_venv # تنشيط بيئة Anaconda -python -m pip install -r requirements.txt # هذه الخطوة مطابقة لخطوة تثبيت pip -``` - - -
إذا كنت بحاجة إلى دعم ChatGLM2 من الجامعة الصينية للاقتصاد وإدارة الأعمال وموس من جامعة فودان كخادم وجودة عالية لطرح الأسئلة، انقر هنا للعرض -

- -【خطوات اختيارية】إذا كنت بحاجة إلى دعم جودة عالية لتشات جامعة تسينهوا (ChatGLM2) الصينية وجامعة فودان (MOSS)، يتعين عليك تثبيت تبعيات إضافية (شرط مسبق: التعامل مع Python واستخدام Pytorch وتوفر الحاسوب الشخصي بمواصفات قوية): -```sh -# 【خطوات اختيارية 1】دعم جودة عالية لتشات جامعة تسينهوا (ChatGLM2) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【خطوات اختيارية 2】دعم جودة عالية لتشات جامعة فودان (MOSS) -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # عند تنفيذ هذا الأمر، يجب أن تكون في مسار المشروع الرئيسي - -# 【خطوات اختيارية 3】دعم RWKV Runner -راجع الويكي: https://github.com/binary-husky/gpt_academic/wiki/دليل-تكوين-RWKV - -# 【خطوات اختيارية 4】تأكد من أن ملف التكوين config.py يحتوي على النماذج المرجوة، وهناك النماذج المدعومة حاليًا التالية (توجد خطط لتشغيل "jittorllms" في docker فقط): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - -4. تشغيل البرنامج -```sh -python main.py -``` - -### طريقة التثبيت الثانية: استخدام Docker - -0. نصب القدرات الكاملة للمشروع (هذا هو الصورة الكبيرة التي تحتوي على CUDA و LaTeX. ولكن إذا كانت سرعة الإنترنت بطيئة أو القرص الصلب صغير، فإننا لا نوصي باستخدام هذا الخيار) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 0 وحذف الخطط الأخرى. ثم أشغل: -docker-compose up -``` - -1. تشغيل نموذج ChatGPT فقط + 文心一言 (Wenxin YIYan) + Spark عبر الإنترنت (يُوصى بهذا الخيار للمعظم) - -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 1 وحذف الخطط الأخرى. ثم أشغل: -docker-compose up -``` - -P.S. للاستفادة من إمكانية اللافتكس الإضافية، يرجى الرجوع إلى الويكي. بالإضافة إلى ذلك، يمكنك استخدام الخطة 4 أو الخطة 0 مباشرة للحصول على إمكانية اللافتكس. - -2. تشغيل نموذج ChatGPT + نموذج ChatGLM2 + نموذج MOSS + نموذج LLAMA2 + تون يي تشين ون (QiChaYiWen) (يتطلب معرفة بتشغيل نيفيديا دوكر (Nvidia Docker)) - -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 2 وحذف الخطط الأخرى. ثم أشغل: -docker-compose up -``` - -### طريقة التثبيت الثالثة: طرائق نشر أخرى -1. **نصوص بنقرة واحدة لأنظمة Windows**. -يمكن لمستخدمي Windows الذين لا يعرفون بيئة Python تنزيل سكربت التشغيل بنقرة واحدة من [الإصدارات](https://github.com/binary-husky/gpt_academic/releases) المنشورة لتثبيت الإصدار الذي لا يحتوي على نماذج محلية. -المساهمة في السكربت تعود لـ[oobabooga](https://github.com/oobabooga/one-click-installers). - -2. استخدام واجهة برمجة تطبيقات (API) مطراف ثالثة، Microsoft Azure، ونشوة النص، وغيرها، يرجى الرجوع إلى [صفحة الويكي](https://github.com/binary-husky/gpt_academic/wiki/إعدادات-التكوين-للمشروع) الخاصة بنا - -3. دليل تجنب المشاكل عند نشر المشروع في خوادم السحابة. -يرجى زيارة صفحة [دليل نشر خوادم السحابة في المحيط](https://github.com/binary-husky/gpt_academic/wiki/دليل-نشر-خوادم-السحابة) - -4. طرائق نشر المشروع بأحدث الأساليب - - استخدام Sealos للنشر السريع [بنقرة واحدة](https://github.com/binary-husky/gpt_academic/issues/993). - - استخدم WSL2 (Windows Subsystem for Linux). يُرجى زيارة صفحة الويكي [لدليل التثبيت-2](https://github.com/binary-husky/gpt_academic/wiki/دليل-تشغيل-WSL2-(Windows-Subsystem-for-Linux) - - كيفية تشغيل البرنامج تحت عنوان فرعي (على سبيل المثال: `http://localhost/subpath`). يُرجى زيارة [إرشادات FastAPI](docs/WithFastapi.md) - - - -# الاستخدام المتقدم -### I: إنشاء أزرار مخصصة (اختصارات أكاديمية) -افتح أي محرر نصوص وافتح `core_functional.py` وأضف الإدخالات التالية ثم أعد تشغيل البرنامج. (إذا كانت الأزرار موجودة بالفعل، بإمكانك تعديل البادئة واللاحقة حراريًا دون الحاجة لإعادة تشغيل البرنامج) -على سبيل المثال: -``` -"ترجمة سوبر الإنجليزية إلى العربية": { - # البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا - "بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n", - - # اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك. - "لاحقة": "", -}, -``` -
- -
- -### II: إنشاء مكونات وظيفية مخصصة -قم بكتابة مكونات وظيفية قوية لتنفيذ أي مهمة ترغب في الحصول عليها وحتى تلك التي لم تخطر لك على بال. -إن إنشاء وتصحيح المكونات في هذا المشروع سهل للغاية، فما عليك سوى أن تمتلك بعض المعرفة الأساسية في لغة البرمجة بايثون وتستند على القالب الذي نقدمه. -للمزيد من التفاصيل، يُرجى الاطلاع على [دليل المكونات الوظيفية](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - - -# التحديثات -### I: تحديثات - -1. ميزة حفظ الدردشة: يمكن حفظ الدردشة الحالية كملف HTML قابل للقراءة والاسترداد ببساطة عند استدعاء الوظيفة في منطقة المكونات `حفظ الدردشة الحالية` ، ويمكن استرجاع المحادثة السابقة ببساطة عند استدعاء الوظيفة في منطقة المكونات (القائمة المنسدلة) `تحميل سجل الدردشة` . -نصيحة: يمكنك النقر المباشر على `تحميل سجل الدردشة` بدون تحديد ملف لعرض ذاكرة التخزين المؤقت لسجلات HTML. -
- -
- -2. ميزة ترجمة المقالات العلمية بواسطة Latex/Arxiv -
- ===> - -
- -3. محطة فراغ (فهم نغمة المستخدم من داخل اللغة الطبيعية واستدعاء وظائف أخرى تلقائيًا) - -- الخطوة 1: اكتب "بالرجاء استدعاء وظيفة ترجمة المقالة الأكاديمية من PDF وعنوان المقال هو https://openreview.net/pdf?id=rJl0r3R9KX". -- الخطوة 2: انقر فوق "محطة الفراغ". - -
- -
- -4. تصميم الوظائف المتعددة القادرة على توفير وظائف قوية بواجهات بسيطة -
- - -
- -5. ترجمة وإلغاء ترجمة المشاريع الأخرى مفتوحة المصدر -
- - -
- -6. ميزة تزيين [live2d](https://github.com/fghrsh/live2d_demo) (مغلقة بشكل افتراضي، يتطلب تعديل `config.py`) -
- -
- -7. إنتاج الصور من OpenAI -
- -
- -8. تحليل وإجماع الصوت من OpenAI -
- -
- -9. إصلاح أخطاء اللغة الطبيعة في Latex -
- ===> - -
- -10. تغيير اللغة والموضوع -
- -
- - - -### II: الإصدارات: -- الإصدار 3.70 (قريبًا): تحسينات لوظائف AutoGen وتصميم سلسلة من المكونات المشتقة -- الإصدار 3.60: إدخال AutoGen كأساس لوظائف الجيل الجديد -- الإصدار 3.57: دعم GLM3، نار النجوم v3، وشجرة الكلمات v4، وإصلاح خطأ الازدحام في النماذج المحلية -- الإصدار 3.56: الدعم لإضافة مزامنة الأزرار الأساسية حسب الطلب، وصفحة تجميع تقارير البيانات في ملف PDF -- الإصدار 3.55: إعادة هيكلة واجهة المستخدم الأمامية، وإضافة نافذة عائمة وشريط قائمة -- الإصدار 3.54: إضافة مترجم الكود المباشر (Code Interpreter) (قيد الانجاز) -- الإصدار 3.53: دعم اختيار موضوعات واجهة مختلفة، وزيادة الاستقرار وحل مشاكل التعارض بين المستخدمين المتعدد -- الإصدار 3.50: استخدام اللغة الطبيعية لاستدعاء جميع وظائف المشروع هذا (محطة فراغ)، ودعم تصنيف الوظائف وتحسين واجهة المستخدم وتصميم مواضيع جديدة -- الإصدار 3.49: دعم المنصات البحثية في بيدو كونفان وشجرة الكلمات -- الإصدار 3.48: دعم علي بابا, بوكما رش حتكيا, إكسونامبلومانت النار -- الإصدار 3.46: دعم محادثة نصية في الوقت الحقيقي غير مراقبة -- الإصدار 3.45: دعم تخصيص LatexChatglm النموذج التعديل -- الإصدار 3.44: دعم Azure رسميًا، وتحسين سهولة الاستخدام للواجهات الأمامية -- الإصدار 3.4: +ترجمة النصوص الكاملة للمقالات من خلال ملف PDF، +اختيار موضع المنطقة النصية، +خيار التخطيط الرأسي، +تحسينات في وظائف التداخل العديدة -- الإصدار 3.3: +وظائف متكاملة للمعلومات عبر الإنترنت -- الإصدار 3.2: دعم وظائف المكونات التي تحتوي معلمات أكثر (حفظ النص، فهم أي لغة برمجة، طلب أي تركيبة LLM في وقت واحد) -- الإصدار 3.1: دعم السؤال نحو نماذج GPT المتعددة! دعم واجهة api2d، دعم توازن الأحمال بين المفاتيح الخاصة المتعددة -- الإصدار 3.0: دعم لنماذج جات، واحدة منها لشتلس الصغيرة -- الإصدار 2.6: إعادة تصميم بنية الوظائف، وتحسين التفاعل وإضافة مزيد من الوظائف -- الإصدار 2.5: التحديث التلقائي، وحل مشكلة النص الطويل عند ملخص المشاريع الضخمة وتجاوز النصوص. -- الإصدار 2.4: (١) إضافة ميزة ترجمة المقالات الدورية. (٢) إضافة ميزة لتحويل مكان منطقة الإدخال. (٣) إضافة خيار التخطيط العمودي (vertical layout). (٤) تحسين وظائف المكونات متعددة الخيوط. -- الإصدار 2.3: تحسين التفاعل مع مواضيع متعددة -- الإصدار 2.2: دعم إعادة تحميل الوظائف المكونة حراريًا -- الإصدار 2.1: تصميم قابل للطي -- الإصدار 2.0: إدخال وحدات الوظائف المكونة -- الإصدار 1.0: الوظائف الأساسية - -مجموعة المطورين GPT Academic QQ: `610599535` - -- مشكلات معروفة - - بعض ملحقات متصفح الترجمة تتداخل مع تشغيل الواجهة الأمامية لهذا البرنامج - - يحتوي Gradio الرسمي حاليًا على عدد كبير من مشاكل التوافق. يُرجى استخدام `requirement.txt` لتثبيت Gradio. - -### III: الأنساق -يمكن تغيير الأنساق بتعديل خيار `THEME` (config.py) -1. `Chuanhu-Small-and-Beautiful` [الرابط](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: فروع تطوير هذا المشروع - -1. الفرع `master`: الفرع الرئيسي، إصدار مستقر -2. الفرع `frontier`: الفرع التطويري، إصدار تجريبي - - -### V: المراجع والفروض التعليمية - -``` -استخدمت العديد من التصاميم الموجودة في مشاريع ممتازة أخرى في الأكواد التالية، للمراجع عشوائية: - -# ViewGradio: -https://github.com/THUD - - - -# مُثبّت بضغطة واحدة Oobabooga: -https://github.com/oobabooga/one-click-installers - -# المزيد: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.English.md b/docs/README.English.md deleted file mode 100644 index 48afdf4549f8f1a83927da857cd535a18e3e6f97..0000000000000000000000000000000000000000 --- a/docs/README.English.md +++ /dev/null @@ -1,357 +0,0 @@ - - - -> **Note** -> -> This README was translated by GPT (implemented by the plugin of this project) and may not be 100% reliable. Please carefully check the translation results. -> -> 2023.11.7: When installing dependencies, please select the **specified versions** in the `requirements.txt` file. Installation command: `pip install -r requirements.txt`. - - -#
GPT Academic Optimization
- -**If you like this project, please give it a Star.** -To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). - -> **Note** -> -> 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs. -> -> 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). -> -> 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it. - - - - -
- -Feature (⭐ = Recently Added) | Description ---- | --- -⭐[Integrate New Models](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) and Wenxin Yiyu, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhifu API, DALLE3 -Proofreading, Translation, Code Explanation | One-click proofreading, translation, searching for grammar errors in papers, explaining code -[Custom Shortcuts](https://www.bilibili.com/video/BV14s4y1E7jN) | Support for custom shortcuts -Modular Design | Support for powerful [plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plugins support [hot updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Program Profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] One-click to profile Python/C/C++/Java/Lua/... project trees or [self-profiling](https://www.bilibili.com/video/BV1cj411A7VW) -Read Papers, [Translate](https://www.bilibili.com/video/BV1KT411x7Wn) Papers | [Plugin] One-click to interpret full-text latex/pdf papers and generate abstracts -Full-text Latex [Translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [Proofreading](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin] One-click translation or proofreading of latex papers -Batch Comment Generation | [Plugin] One-click batch generation of function comments -Markdown [Translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin] Did you see the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the top five languages? -Chat Analysis Report Generation | [Plugin] Automatically generates summary reports after running -[PDF Paper Full-text Translation](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extract title & abstract of PDF papers + translate full-text (multi-threaded) -[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin] Enter the arxiv article URL to translate the abstract + download PDF with one click -One-click Proofreading of Latex Papers | [Plugin] Syntax and spelling correction of Latex papers similar to Grammarly + output side-by-side PDF -[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin] Given any Google Scholar search page URL, let GPT help you [write related works](https://www.bilibili.com/video/BV1GP411U7Az/) -Internet Information Aggregation + GPT | [Plugin] One-click to let GPT retrieve information from the Internet to answer questions and keep the information up to date -⭐Arxiv Paper Fine Translation ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] One-click [high-quality translation of arxiv papers](https://www.bilibili.com/video/BV1dz4y1v77A/), the best paper translation tool at present -⭐[Real-time Speech Input](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] Asynchronously [listen to audio](https://www.bilibili.com/video/BV1AV4y187Uy/), automatically segment sentences, and automatically find the best time to answer -Formula/Image/Table Display | Can simultaneously display formulas in [TeX form and rendered form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formula and code highlighting -⭐AutoGen Multi-Agent Plugin | [Plugin] Explore the emergence of multi-agent intelligence with Microsoft AutoGen! -Start Dark [Theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` to the end of the browser URL to switch to the dark theme -[More LLM Model Support](https://www.bilibili.com/video/BV1wT411p7yf) | It must be great to be served by GPT3.5, GPT4, [THU ChatGLM2](https://github.com/THUDM/ChatGLM2-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time, right? -⭐ChatGLM2 Fine-tuning Model | Support for loading ChatGLM2 fine-tuning models and providing ChatGLM2 fine-tuning assistant plugins -More LLM Model Access, support for [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Join NewBing interface (New Bing), introduce Tsinghua [JittorLLMs](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama) and [Pangu](https://openi.org.cn/pangu/) -⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip package | Use this project's all function plugins directly in Python without GUI (under development) -⭐Void Terminal Plugin | [Plugin] Schedule other plugins of this project directly in natural language -More New Feature Demonstrations (Image Generation, etc.)...... | See the end of this document ........ -
- - -- New interface (modify the LAYOUT option in `config.py` to switch between "left-right layout" and "top-bottom layout") -
- -
- - -- All buttons are dynamically generated by reading `functional.py` and can be added with custom functions to free up the clipboard -
- -
- -- Proofreading/Correction -
- -
- - - -- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading. -
- -
- -- Too lazy to look at the project code? Show off the whole project directly in chatgpt's mouth -
- -
- -- Multiple large language models mixed calling (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Installation -### Installation Method I: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure API KEY and other settings, [click here to see special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 - -「 The program will first check if a secret configuration file named `config_private.py` exists and use the configurations from that file to override the ones in `config.py` with the same names. If you understand this logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configurations from `config.py` to `config_private.py` (only copy the configuration items you have modified). 」 - -「 Project configuration can be done via `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). Configuration priority: `environment variables` > `config_private.py` > `config.py`. 」 - - -3. Install dependencies -```sh -# (Option I: If you are familiar with python, python>=3.9) Note: Use the official pip source or the Aliyun pip source. Temporary method for switching the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: Using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create the anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation process -``` - - -
If you need to support THU ChatGLM2, Fudan MOSS, or RWKV Runner as backends, click here to expand -

- -【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration): -```sh -# 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【Optional Step II】Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, make sure you are in the root directory of the project - -# 【Optional Step III】Support RWKV Runner -Refer to wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# 【Optional Step IV】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. The currently supported models are as follows (jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -``` - -### Installation Method II: Use Docker - -0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. Not recommended if you have slow internet speed or small hard drive) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 0 and delete other schemes. Then run: -docker-compose up -``` - -1. ChatGPT + Wenxin + Spark online models only (recommended for most people) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 1 and delete other schemes. Then run: -docker-compose up -``` - -P.S. If you need the latex plugin functionality, please see the Wiki. Also, you can directly use scheme 4 or scheme 0 to get the Latex functionality. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Intelligent Questions (requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 2 and delete other schemes. Then run: -docker-compose up -``` - - -### Installation Method III: Other deployment methods -1. **Windows one-click running script**. -Windows users who are completely unfamiliar with the python environment can download the one-click running script from the [Release](https://github.com/binary-husky/gpt_academic/releases) to install the version without local models. -The script is contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Use third-party APIs, Azure, Wenxin, Xinghuo, etc., see [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) - -3. Pitfall guide for deploying on cloud servers. -Please visit [Cloud Server Remote Deployment Wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -4. Some new deployment platforms or methods - - Use Sealos [to deploy with one click](https://github.com/binary-husky/gpt_academic/issues/993). - - Use WSL2 (Windows Subsystem for Linux). Please refer to [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - How to run under a subpath (such as `http://localhost/subpath`). Please visit [FastAPI Run Instructions](docs/WithFastapi.md) - - - -# Advanced Usage -### I: Customizing new convenient buttons (academic shortcuts) -Open `core_functional.py` with any text editor, add the following entry, and then restart the program. (If the button already exists, both the prefix and suffix can be modified on-the-fly without restarting the program.) -For example: -``` -"Super Translation": { - # Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc. - "Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n", - - # Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix. - "Suffix": "", -}, -``` -
- -
- -### II: Custom function plugins -Write powerful function plugins to perform any task you desire and can't imagine. -The difficulty of writing and debugging plugins in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plugin functionality by following the template we provide. -For more details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - -# Updates -### I: Dynamics - -1. Conversation-saving feature. Call `Save the current conversation` in the function plugin area to save the current conversation as a readable and restorable HTML file. Additionally, call `Load conversation history archive` in the function plugin area (drop-down menu) to restore previous sessions. -Tip: Clicking `Load conversation history archive` without specifying a file allows you to view the cached historical HTML archive. -
- -
- -2. ⭐Latex/Arxiv paper translation feature⭐ -
- ===> - -
- -3. Void Terminal (understanding user intent from natural language input and automatically calling other plugins) - -- Step 1: Enter " Please call the plugin to translate the PDF paper, the address is https://openreview.net/pdf?id=rJl0r3R9KX" -- Step 2: Click "Void Terminal" - -
- -
- -4. Modular function design, simple interface supporting powerful functionality -
- - -
- -5. Translate and interpret other open-source projects -
- - -
- -6. Added small features that decorate [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, needs modification in `config.py`) -
- -
- -7. OpenAI image generation -
- -
- -8. OpenAI audio parsing and summarization -
- -
- -9. Latex full-text proofreading and correction -
- ===> - -
- -10. Language and theme switching -
- -
- - - -### II: Versions: -- version 3.70 (todo): Optimize the AutoGen plugin theme and design a series of derivative plugins -- version 3.60: Introduce AutoGen as the cornerstone of the new generation of plugins -- version 3.57: Support GLM3, Spark v3, Wenxin Quote v4, and fix concurrency bugs in local models -- version 3.56: Support dynamically adding basic functional buttons and a new summary PDF page -- version 3.55: Refactor the frontend interface and introduce floating windows and a menu bar -- version 3.54: Add a dynamic code interpreter (Code Interpreter) (to be improved) -- version 3.53: Support dynamically choosing different interface themes, improve stability, and resolve conflicts between multiple users -- version 3.50: Use natural language to call all function plugins of this project (Void Terminal), support plugin classification, improve UI, and design new themes -- version 3.49: Support Baidu Qianfan Platform and Wenxin Quote -- version 3.48: Support Ali Dharma Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng, and Xunfei Spark -- version 3.46: Support fully hands-off real-time voice conversation -- version 3.45: Support customizing ChatGLM2 fine-tuned models -- version 3.44: Officially support Azure, optimize interface usability -- version 3.4: + Arxiv paper translation, latex paper correction functionality -- version 3.3: + Internet information integration functionality -- version 3.2: Function plugins support more parameter interfaces (conversation saving functionality, interpreting any code language + asking any combination of LLMs simultaneously) -- version 3.1: Support querying multiple GPT models simultaneously! Support API2D, support load balancing for multiple API keys -- version 3.0: Support chatglm and other small-scale LLMs -- version 2.6: Refactored plugin structure, improved interactivity, added more plugins -- version 2.5: Self-updating, fix the problem of text being too long and token overflowing when summarizing large code projects -- version 2.4: (1) Add PDF full-text translation functionality; (2) Add functionality to switch the position of the input area; (3) Add vertical layout option; (4) Optimize multi-threaded function plugins. -- version 2.3: Enhance multi-threaded interactivity -- version 2.2: Function plugin hot-reloading support -- version 2.1: Collapsible layout -- version 2.0: Introduce modular function plugins -- version 1.0: Basic functionality - -GPT Academic Developer QQ Group: `610599535` - -- Known Issues - - Some browser translation plugins interfere with the frontend operation of this software - - Official Gradio currently has many compatibility bugs, please make sure to install Gradio using `requirement.txt` - -### III: Themes -You can change the theme by modifying the `THEME` option (config.py). -1. `Chuanhu-Small-and-Beautiful` [Website](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - -### IV: Development Branches of This Project - -1. `master` branch: Main branch, stable version -2. `frontier` branch: Development branch, test version - -*** - -### V: References and Learning - - -The code references the designs of many other excellent projects, in no particular order: - -[THU ChatGLM2-6B](https://github.com/THUDM/ChatGLM2-6B) - - -[THU JittorLLMs](https://github.com/Jittor/JittorLLMs) - - -[ChatPaper](https://github.com/kaixindelele/ChatPaper) - - -[Edge-GPT](https://github.com/acheong08/EdgeGPT) - - -[ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) - - - -# Oobabooga one-click installer: -https://github.com/oobabooga/one-click-installers - -# More: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.French.md b/docs/README.French.md deleted file mode 100644 index bf136e59dc42f88b7303efc2cd98c12f55e4bced..0000000000000000000000000000000000000000 --- a/docs/README.French.md +++ /dev/null @@ -1,356 +0,0 @@ - - - -> **Remarque** -> -> Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction. -> -> 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`. - - -#
Optimisation académique GPT (GPT Academic)
- -**Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !** - -Si vous aimez ce projet, veuillez lui donner une étoile. -Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental). - -> **Remarque** -> -> 1. Veuillez noter que seuls les plugins (boutons) marqués en **surbrillance** prennent en charge la lecture de fichiers, et certains plugins se trouvent dans le **menu déroulant** de la zone des plugins. De plus, nous accueillons avec la plus haute priorité les nouvelles demandes d'extraction de plugins. -> -> 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).. -> -> 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci. - - -
- -Fonctionnalités (⭐ = fonctionnalité récemment ajoutée) | Description ---- | --- -⭐[Modèles acquis](https://github.com/binary-husky/gpt_academic/wiki/如何切换模型)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) et Wenxin Yiyuan, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhifu API, DALLE3 -Amélioration, traduction, explication du code | Correction, traduction, recherche d'erreurs de syntaxe dans les articles, explication du code -[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | Prise en charge de raccourcis personnalisés -Conception modulaire | Prise en charge de plugins puissants personnalisables, prise en charge de la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/函数插件指南) des plugins -[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Analyse en profondeur d'un arbre de projets Python/C/C++/Java/Lua/... d'un simple clic ou [auto-analyse](https://www.bilibili.com/video/BV1cj411A7VW) -Lecture d'articles, traduction d'articles | [Plugin] Lecture automatique des articles LaTeX/PDF et génération du résumé -Traduction complète de [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) ou amélioration de leur qualité | [Plugin] Traduction ou amélioration rapide des articles LaTeX -Génération de commentaires en masse | [Plugin] Génération facile de commentaires de fonctions -Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) du Markdown | [Plugin] Avez-vous vu le [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) dans les cinq langues ci-dessus ? -Génération de rapports d'analyse du chat | [Plugin] Génération automatique d'un rapport récapitulatif après l'exécution du chat -[Fonction de traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extraction du titre et du résumé d'un article PDF, ainsi que traduction intégrale (multithreading) -Assistant Arxiv | [Plugin] Saisissez l'URL d'un article Arxiv pour traduire automatiquement le résumé et télécharger le PDF -Correction automatique d'articles LaTeX | [Plugin] Correction de la grammaire, de l'orthographe et comparaison avec le PDF correspondant, à la manière de Grammarly -Assistant Google Scholar | [Plugin] Donner l'URL d'une page de recherche Google Scholar pour obtenir de l'aide sur l'écriture des références -Agrégation d'informations sur Internet + GPT | [Plugin] Obtenez les informations de l'Internet pour répondre aux questions à l'aide de GPT, afin que les informations ne soient jamais obsolètes -⭐Traduction détaillée des articles Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Traduction de haute qualité d'articles Arxiv en un clic, le meilleur outil de traduction d'articles à ce jour -⭐[Saisie orale en temps réel](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] Écoute asynchrone de l'audio, découpage automatique et recherche automatique du meilleur moment pour répondre -Affichage des formules, images, tableaux | Affichage simultané de la forme [TeX et rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) des formules, prise en charge de la mise en évidence des formules et du code -⭐Plugin AutoGen multi-agents | [Plugin] Explorez les émergences intelligentes à plusieurs agents avec Microsoft AutoGen ! -Activation du [thème sombre](https://github.com/binary-husky/gpt_academic/issues/173) | Ajouter ```/?__theme=dark``` à l'URL du navigateur pour basculer vers le thème sombre -Prise en charge de plusieurs modèles LLM | Expérimentez avec GPT 3.5, GPT4, [ChatGLM2 de Tsinghua](https://github.com/THUDM/ChatGLM2-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS) simultanément ! -⭐Modèle ChatGLM2 fine-tuned | Chargez et utilisez un modèle fine-tuned de ChatGLM2, disponible avec un plugin d'assistance -Prise en charge de plus de modèles LLM, déploiement sur [Huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout de l'interface de connaissance-API, support de [LLaMA](https://github.com/facebookresearch/llama) et [PanGuα](https://openi.org.cn/pangu/) -⭐Paquet pip [void-terminal](https://github.com/binary-husky/void-terminal) | Accédez à toutes les fonctions et plugins de ce projet directement depuis Python (en cours de développement) -⭐Plugin terminal du vide | [Plugin] Utilisez un langage naturel pour interagir avec les autres plugins du projet -Affichage de nouvelles fonctionnalités (génération d'images, etc.) …… | Voir à la fin de ce document …… -
- - -- Nouvelle interface (modifiez l'option LAYOUT dans `config.py` pour basculer entre la disposition "gauche-droite" et "haut-bas") -
- -
- - -- Tous les boutons sont générés dynamiquement en lisant `functional.py`, vous pouvez donc ajouter de nouvelles fonctionnalités personnalisées et libérer le presse-papiers. -
- -
- -- Retouche/correction -
- -
- - - -- If the output contains formulas, they will be displayed in both tex and rendered forms for easy copying and reading. - -
- -
- -- Don't feel like looking at the project code? Just give it to ChatGPT to show off. - -
- -
- -- Multiple large language models are mixed and used together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4). - -
- -
- -# Installation -### Method I: Run directly (Windows, Linux, or MacOS) - -1. Download the project -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure the API KEY and other settings. [Click here to see methods for special network environment configurations](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -「 The program will first check if there is a confidential configuration file named `config_private.py`, and use the configurations in that file to override the corresponding configurations in `config.py`. If you understand this logic, we strongly recommend creating a new configuration file named `config_private.py` right next to `config.py`, and move (copy) the configurations from `config.py` to `config_private.py` (only copy the configurations that you have modified). 」 - -「 You can also configure the project using `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or on our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). The priority of configuration reading is: `environment variables` > `config_private.py` > `config.py`. 」 - -3. Install dependencies -```sh -# (Option I: If you are familiar with Python, python>=3.9) Note: Use the official pip source or the Ali pip source. Temporary change of source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: Use Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation step -``` - - -
If you need to support Tsinghua ChatGLM2/Fudan MOSS/RWKV as backends, click here to expand -

- -[Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration): -```sh -# [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: You need to be at the root directory of the project when executing this line of code - -# [Optional Step III] Support RWKV Runner -Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# [Optional Step IV] Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. The currently supported models are as follows (jittorllms series currently only support the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - -4. Run -```sh -python main.py -``` - -### Method II: Use Docker - -0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. But if you have a slow internet speed or a small hard drive, it is not recommended to use this) - -``` sh -# Modify the docker-compose.yml file, keep scheme 0 and delete the other schemes. Then run: -docker-compose up -``` - -1. ChatGPT + Wenxin Yiyu + Spark and other online models (recommended for most people) - -``` sh -# Modify the docker-compose.yml file, keep scheme 1 and delete the other schemes. Then run: -docker-compose up -``` - -NOTE: If you need Latex plugin functionality, please refer to the Wiki. Additionally, you can also use scheme 4 or scheme 0 directly to obtain Latex functionality. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Tongyi Qianwen (requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) - -``` sh -# Modify the docker-compose.yml file, keep scheme 2 and delete the other schemes. Then run: -docker-compose up -``` - - -### Method III: Other deployment methods -1. **One-click run script for Windows**. -Windows users who are completely unfamiliar with the Python environment can download the one-click run script without local models from the [Release](https://github.com/binary-husky/gpt_academic/releases) section. -The script was contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Use third-party APIs, Azure, Wenxin Yiyu, Xinghuo, etc., see the [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -3. Pitfall guide for deploying on cloud servers. -Please visit the [cloud server remote deployment wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). - -4. Some new deployment platforms or methods - - Use Sealos [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). - - Use WSL2 (Windows Subsystem for Linux). Please visit the [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - How to run under a subpath (such as `http://localhost/subpath`). Please see [FastAPI running instructions](docs/WithFastapi.md) - - - -# Utilisation avancée -### I: Personnalisation des nouveaux boutons d'accès rapide (raccourcis académiques) -Ouvrez `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les entrées suivantes, puis redémarrez le programme. (Si le bouton existe déjà, le préfixe et le suffixe peuvent être modifiés à chaud sans redémarrer le programme). -Par exemple: -``` -"Traduction avancée de l'anglais vers le français": { - # Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc. - "Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n", - - # Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets. - "Suffix": "", -}, -``` -
- -
- -### II: Personnalisation des plugins de fonction -Écrivez de puissants plugins de fonction pour accomplir toutes les tâches que vous souhaitez ou ne pouvez pas imaginer. -Le développement et le débogage de ces plugins dans ce projet sont très faciles. Tant que vous avez des connaissances de base en python, vous pouvez implémenter vos propres fonctionnalités grâce à notre modèle fourni. -Veuillez consulter le [Guide des plugins de fonction](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails. - - -# Mises à jour -### I: Dynamique - -1. Fonction de sauvegarde de conversation. Appelez `Enregistrer la conversation en cours` dans la zone des plugins fonctionnels pour enregistrer la conversation en cours sous la forme d'un fichier HTML lisible et récupérable. En outre, appelez `Charger les archives de conversation` dans la zone des plugins fonctionnels (menu déroulant) pour restaurer les conversations précédentes. -Astuce: Si aucun fichier n'est spécifié, cliquez directement sur `Charger les archives de conversation` pour afficher le cache des archives HTML. -
- -
- -2. ⭐ Fonction de traduction des articles Latex/Arxiv ⭐ -
- ===> - -
- -3. Terminal du néant (comprendre l'intention de l'utilisateur à partir de la saisie en langage naturel et appeler automatiquement d'autres plugins) - -- Étape 1: Saisissez "Veuillez appeler le plugin de traduction pour le document PDF, l'URL est https://openreview.net/pdf?id=rJl0r3R9KX". -- Étape 2 : Cliquez sur "Terminal du néant". - -
- -
- -4. Conception de fonctionnalités modulaires, une interface simple peut prendre en charge des fonctionnalités puissantes -
- - -
- -5. Traduction et interprétation d'autres projets open-source -
- - -
- -6. Fonctionnalités supplémentaires intégrant [live2d](https://github.com/fghrsh/live2d_demo) (désactivé par défaut, nécessite des modifications dans `config.py`) -
- -
- -7. Génération d'images par OpenAI -
- -
- -8. Analyse et résumé audio par OpenAI -
- -
- -9. Vérification et correction orthographique complète du document en Latex -
- ===> - -
- -10. Changement de langue et de thème -
- -
- - - -### II: Versions: -- version 3.70(tâche à accomplir) : Optimisation de la fonction AutoGen et création d'une série de plugins dérivés -- version 3.60 : Introduction d'AutoGen comme base des nouveaux plugins -- version 3.57 : Prise en charge de GLM3, Starlight v3, Zen v4 et correction de l'incompatibilité des modèles locaux -- version 3.56 : Possibilité d'ajouter dynamiquement des boutons de fonction de base et nouvelle page de synthèse des PDF -- version 3.55: Refonte de l'interface utilisateur avec fenêtres flottantes et barre de menu -- version 3.54 : Nouvel interpréteur de code dynamique (Code Interpreter) (à améliorer) -- version 3.53 : Possibilité de choisir dynamiquement différents thèmes d'interface, amélioration de la stabilité et résolution des problèmes de conflit entre utilisateurs multiples -- version 3.50 : Utiliser le langage naturel pour appeler toutes les fonctions du projet (Terminal du néant), prise en charge de la classification des plugins, amélioration de l'interface utilisateur, conception de nouveaux thèmes -- version 3.49 : Prise en charge de Baidu Qianfan et Xiaomi-Wenyiyan -- version 3.48 : Prise en charge d'Ali-DA, Shanghai AI-Lab-Shusheng et Xunfei Xinghuo -- version 3.46 : Prise en charge de la conversation audio temps réel sans intervention -- version 3.45 : Prise en charge de la personnalisation du modèle ChatGLM2 -- version 3.44 : Prise en charge officielle d'Azure, amélioration de l'utilisabilité de l'interface -- version 3.4 : +traduction complète des articles Arxiv, +correction des articles Latex -- version 3.3 : +fonction d'intégration d'informations Internet -- version 3.2 : Les plugins de fonction prennent en charge plus de paramètres (fonction d'enregistrement de conversation, débogage de code de n'importe quel langage + demandes d'LLM arbitraires) -- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles gpt ! Prise en charge de l'API2D, répartition de charge entre plusieurs clés API -- version 3.0 : Prise en charge de chatglm et d'autres petits llm -- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de nouveaux plugins -- version 2.5 : Auto-mise à jour, résolution des problèmes de dépassement de longueur de texte et de jeton pendant la consolidation de grands projets de codes sources -- version 2.4 : (1) Nouvelle fonctionnalité de traduction complète des documents PDF ; (2) Nouvelle fonctionnalité de changement de position de la zone de saisie ; (3) Nouvelle option de disposition verticale ; (4) Optimisation des plugins de fonction multithreads. -- version 2.3 : Amélioration de l'interactivité multi-threads -- version 2.2 : Prise en charge du rechargement à chaud des plugins de fonction -- version 2.1 : Mise en page pliable -- version 2.0 : Introduction de plugins de fonction modulaires -- version 1.0: Fonctionnalités de base - -Groupe QQ des développeurs de GPT Academic: `610599535` - -- Problèmes connus - - Certains plugins de traduction de navigateurs peuvent nuire au fonctionnement de l'interface utilisateur de ce logiciel. - - Gradio officiel a actuellement de nombreux bugs de compatibilité. Veuillez utiliser `requirement.txt` pour installer Gradio. - -### III: Thèmes -Vous pouvez modifier le thème en modifiant l'option `THEME` (config.py). - -1. `Chuanhu-Small-and-Beautiful` [Lien](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: Branches de développement de ce projet - -1. Branche `master` : Branche principale, version stable -2. Branche `frontier` : Branche de développement, version de test - - -### V: Références et apprentissage - -``` -De nombreux designs de codes de projets exceptionnels ont été référencés dans le développement de ce projet, sans ordre spécifique : - -# ChatGLM2-6B de l'Université Tsinghua: -https://github.com/THUDM/ChatGLM2-6B - -# JittorLLMs de l'Université Tsinghua: -https://github.com/Jittor/JittorLLMs - -# ChatPaper : -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT : -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT : -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Oobabooga installeur en un clic : -https://github.com/oobabooga/one-click-installers - -# Plus: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.German.md b/docs/README.German.md deleted file mode 100644 index 87f7db5903d3bccabae8dbee97bfe274a1864946..0000000000000000000000000000000000000000 --- a/docs/README.German.md +++ /dev/null @@ -1,363 +0,0 @@ - - - -> **Hinweis** -> -> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig. -> -> 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`. - - -#
GPT Academic (GPT Akademisch)
- -**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!** - -Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. -Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell). - -> **Hinweis** -> -> 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**. -> -> 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung). -> -> 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen. - - - - -
- -Funktionen (⭐= Kürzlich hinzugefügte Funktion) | Beschreibung ---- | --- -⭐[Neues Modell integrieren](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) und Wenxin Yanyi, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Cognitive Graph API, DALLE3 -Verfeinern, Übersetzen, Codierung erläutern | Ein-Klick-Verfeinerung, Übersetzung, Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten, Erklärung von Code -[Eigene Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) definieren | Eigene Tastenkombinationen definieren -Modulare Gestaltung | Ermöglicht die Verwendung benutzerdefinierter leistungsstarker [Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), Plugins unterstützen [Hot-Reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Ermöglicht die Erstellung einer Projekthierarchie für Python/C/C++/Java/Lua/... mit nur einem Klick oder [Selbstanalyse](https://www.bilibili.com/video/BV1cj411A7VW) -Lesen von Forschungsarbeiten, Übersetzen von Forschungsarbeiten | [Plugin] Ermöglicht eine Umwandlung des gesamten Latex-/PDF-Forschungspapiers mit nur einem Klick und generiert eine Zusammenfassung -Latex-Übersetzung des vollständigen Textes, Ausbesserung | [Plugin] Ermöglicht eine Übersetzung oder Verbesserung der Latex-Forschungsarbeit mit nur einem Klick -Erzeugen von Batch-Anmerkungen | [Plugin] Erzeugt Funktionserläuterungen in Stapeln -Markdown- [En-De-Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen? -Erzeugen eines Chat-Analyseberichts | [Plugin] Generiert einen zusammenfassenden Bericht nach der Ausführung -PDF-Textübersetzungsmerkmal | [Plugin] Extrahiert Titel und Zusammenfassung des PDF-Dokuments und übersetzt den vollständigen Text (mehrfädig) -Arxiv-Assistent | [Plugin] Geben Sie die URL eines Arxiv-Artikels ein, um eine Zusammenfassung zu übersetzen und die PDF-Datei herunterzuladen -Automatische Überprüfung von Latex-Artikeln | [Plugin] Überprüft die Grammatik und Rechtschreibung von Latex-Artikeln nach dem Vorbild von Grammarly und generiert eine PDF-Vergleichsdatei -Google Scholar Integration Assistant | [Plugin] Geben Sie eine beliebige URL der Google Scholar-Suchseite ein und lassen Sie GPT Ihre [Verwandten Arbeiten](https://www.bilibili.com/video/BV1GP411U7Az/) schreiben -Internetinformationsaggregation + GPT | [Plugin] Ermöglicht es GPT, Fragen durch das Durchsuchen des Internets zu beantworten und Informationen immer auf dem neuesten Stand zu halten -⭐Feine Übersetzung von Arxiv-Artikeln ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Übersetzt Arxiv-Artikel [mit hoher Qualität](https://www.bilibili.com/video/BV1dz4y1v77A/) mit einem Klick - das beste Übersetzungstool für wissenschaftliche Artikel -⭐[Echtzeit-Spracheingabe](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] [Asynchrones Lauschen auf Audio-Eingabe](https://www.bilibili.com/video/BV1AV4y187Uy/), automatisches Zerschneiden des Textes, automatische Suche nach dem richtigen Zeitpunkt zur Beantwortung -Darstellen von Formeln/Bildern/Tabellen | Zeigt Formeln sowohl in [TEX-](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)- als auch in gerenderten Formen an, unterstützt Formeln und Code-Hervorhebung -⭐AutoGen Multi-Agent Plugin | [Plugin] Erforscht die Möglichkeiten des emergenten Verhaltens von Multi-Agent-Systemen mit Microsoft AutoGen! -Start im Dark-Theme | Um das Dark-Theme zu aktivieren, fügen Sie ```/?__theme=dark``` am Ende der URL im Browser hinzu -[Mehrsprachige LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf) unterstützt | Es ist sicherlich beeindruckend, von GPT3.5, GPT4, [ChatGLM2 der Tsinghua University](https://github.com/THUDM/ChatGLM2-6B), [MOSS der Fudan University](https://github.com/OpenLMLab/MOSS) bedient zu werden, oder? -⭐ChatGLM2 Feinabstimmungsmodell | Unterstützt das Laden von ChatGLM2-Feinabstimmungsmodellen und bietet Unterstützung für ChatGLM2-Feinabstimmungsassistenten -Integration weiterer LLM-Modelle, Unterstützung von [Huggingface-Deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der [Jittorllms der Tsinghua University](https://github.com/Jittor/JittorLLMs) zur Unterstützung von LLaMA und PanGu Alpha -⭐[void-terminal](https://github.com/binary-husky/void-terminal) Pip-Paket | Verwenden Sie das Projekt in Python direkt, indem Sie das gesamte Funktionsplugin verwenden (in Entwicklung) -⭐Void-Terminal-Plugin | [Plugin] Verwenden Sie natürliche Sprache, um andere Funktionen dieses Projekts direkt zu steuern -Weitere Funktionen anzeigen (z. B. Bildgenerierung) …… | Siehe das Ende dieses Dokuments …… -
- - -- Neues Interface (Ändern Sie die LAYOUT-Option in der `config.py`, um zwischen "Links-Rechts-Layout" und "Oben-Unten-Layout" zu wechseln) -
- -
- - -- Alle Schaltflächen werden dynamisch aus der `functional.py` generiert und ermöglichen das beliebige Hinzufügen benutzerdefinierter Funktionen zur Befreiung der Zwischenablage. -
- -
- -- Überarbeiten/Korrigieren -
- -
- - - -- If the output contains formulas, they will be displayed in both tex format and rendering format for easy copying and reading. -
- -
- -- Don't want to look at the project code? Show off the whole project directly in chatgpt's mouth. -
- -
- -- Multiple large language models mixed calling (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Installation -### Installation Method I: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure API KEY and other settings, [click to view special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions). - -「 The program will first check if there is a confidential configuration file named `config_private.py` and use its configuration to override the configuration with the same name in `config.py`. If you understand this reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configuration in `config.py` to `config_private.py` (only copy the configuration items that you have modified). 」 - -「 You can configure the project through `environment variables`. The format of environment variables can refer to the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions). The priority of configuration reading is: `environment variables` > `config_private.py` > `config.py`. 」 - - -3. Install dependencies -```sh -# (Option I: if you are familiar with python, python>=3.9) Note: Use the official pip source or Ali pip source, temporary method to change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: Using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # This step is the same as installing with pip -``` - - -
If you need support for Tsinghua ChatGLM2/Fudan MOSS/RWKV as backend, please click to expand. -

- -[Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration): -```sh -# [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root path of the project - -# [Optional Step III] Support RWKV Runner -Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/Support-RWKV-Runner - -# [Optional Step IV] Make sure the AVAIL_LLM_MODELS in config.py includes the expected models. The currently supported models are as follows (the jittorllms series only supports the docker solution at present): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -``` - -### Installation Method II: Use Docker - -0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. But if you have a slow internet speed or a small hard drive, it is not recommended to use this) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Modify docker-compose.yml, keep solution 0 and delete other solutions. Then run: -docker-compose up -``` - -1. ChatGPT + Wenxin's words + spark and other online models (recommended for most people) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Modify docker-compose.yml, keep solution 1 and delete other solutions. Then run: -docker-compose up -``` - -P.S. If you need the Latex plugin functionality, please refer to the Wiki. Also, you can directly use solution 4 or 0 to get the Latex functionality. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Thousand Questions (Requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Modify docker-compose.yml, keep solution 2 and delete other solutions. Then run: -docker-compose up -``` - - -### Installation Method III: Other Deployment Methods -1. **Windows One-Click Script**. -Windows users who are completely unfamiliar with the python environment can download the one-click script for installation without local models in the published [Release](https://github.com/binary-husky/gpt_academic/releases). -The script is contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Use third-party APIs, Azure, Wenxin's words, Spark, etc., see [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions) - -3. Pit avoidance guide for cloud server remote deployment. -Please visit the [Cloud Server Remote Deployment Wiki](https://github.com/binary-husky/gpt_academic/wiki/Cloud-Server-Remote-Deployment-Guide) - -4. Some new deployment platforms or methods - - Use Sealos [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). - - Use WSL2 (Windows Subsystem for Linux). Please visit the [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/Deploy-on-Windows-Subsystem-for-Linux-WSL2) - - How to run under a subpath (such as `http://localhost/subpath`). Please visit [FastAPI Running Instructions](docs/WithFastapi.md) - - - -# Fortgeschrittene Nutzung -### I: Benutzerdefinierte Tasten hinzufügen (akademische Hotkeys) -Öffnen Sie die Datei `core_functional.py` mit einem beliebigen Texteditor und fügen Sie folgenden Eintrag hinzu. Starten Sie dann das Programm neu. (Wenn die Schaltfläche bereits vorhanden ist, können sowohl das Präfix als auch das Suffix schnell geändert werden, ohne dass das Programm neu gestartet werden muss.) - -Beispiel: -``` -"Übersetzung von Englisch nach Chinesisch": { - # Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw. - "Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n", - - # Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen. - "Suffix": "", -}, -``` -
- -
- -### II: Benutzerdefinierte Funktionsplugins -Schreiben Sie leistungsstarke Funktionsplugins, um beliebige Aufgaben zu erledigen, die Sie wünschen oder nicht erwartet haben. -Das Erstellen und Debuggen von Plugins in diesem Projekt ist einfach und erfordert nur Grundkenntnisse in Python. Sie können unser bereitgestelltes Template verwenden, um Ihre eigene Plugin-Funktion zu implementieren. -Weitere Informationen finden Sie in der [Plugin-Anleitung](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - - -# Aktualisierungen -### I: Neuigkeiten - -1. Dialogspeicherungsfunktion. Rufen Sie im Funktionspluginbereich "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbare und wiederherstellbare HTML-Datei zu speichern. -Darüber hinaus können Sie im Funktionspluginbereich (Dropdown-Menü) "Dialoghistorie laden" aufrufen, um frühere Sitzungen wiederherzustellen. -Tipp: Wenn kein Dateiname angegeben ist, können Sie direkt auf "Dialoghistorie laden" klicken, um den Verlauf des HTML-Archivs anzuzeigen. -
- -
- -2. ⭐ Latex/Arxiv-Papierübersetzungsfunktion ⭐ -
- ===> - -
- -3. Leere Terminaloberfläche (Verständnis der Benutzerabsicht und automatischer Aufruf anderer Plugins aus natürlicher Spracheingabe) - -- Schritt 1: Geben Sie "Bitte Plugin aufrufen, um das PDF-Papier zu übersetzen, dessen Adresse https://openreview.net/pdf?id=rJl0r3R9KX ist" ein. -- Schritt 2: Klicken Sie auf "Leere Terminaloberfläche". - -
- -
- -4. Modulare Funktionsgestaltung mit einfacher Schnittstelle für leistungsstarke Funktionen -
- - -
- -5. Übersetzung und Lösung anderer Open-Source-Projekte -
- - -
- -6. Funktionen zur Dekoration von [live2d](https://github.com/fghrsh/live2d_demo) (standardmäßig deaktiviert, config.py muss geändert werden) -
- -
- -7. OpenAI-Bildgenerierung -
- -
- -8. OpenAI-Audioanalyse und Zusammenfassung -
- -
- -9. Latex-Volltextkorrektur -
- ===> - -
- -10. Sprach- und Themenwechsel -
- -
- - - -### II: Versionen: -- Version 3.70 (ausstehend): Optimierung des AutoGen-Plugin-Themas und Entwicklung einer Reihe von abgeleiteten Plugins -- Version 3.60: Einführung von AutoGen als Grundlage für neue Plugin-Generation -- Version 3.57: Unterstützung von GLM3, SparkV3, WenxinYiyanV4, Behebung von Problemen bei gleichzeitiger Verwendung von lokalen Modellen -- Version 3.56: Dynamische Hinzufügung von Basisfunktionsbuttons, neue Übersichtsseite für PDFs -- Version 3.55: Überarbeitung der Benutzeroberfläche, Hinzufügung von Schwebefenstern und Menüleiste -- Version 3.54: Neuer dynamischer Code interpretier (Code Interpreter) (unfertig) -- Version 3.53: Unterstützung für dynamische Auswahl verschiedener Oberflächenthemen, Verbesserung der Stabilität und Behebung von Mehrbenutzerkonflikten -- Version 3.50: Verwenden Sie natürliche Sprache, um alle Funktionen dieses Projekts aufzurufen (leeres Terminal), Unterstützung für Plugin-Kategorien, verbesserte Benutzeroberfläche, neue Themen -- Version 3.49: Unterstützung für Baidu Qianfan Platform und WenxinYiyan -- Version 3.48: Unterstützung für Alibaba Damo Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng, Xunfei Spark -- Version 3.46: Vollständig automatisierter Echtzeit-Sprachdialog -- Version 3.45: Anpassbare ChatGLM2-Feinjustierung -- Version 3.44: Offizielle Unterstützung für Azure, Verbesserung der Benutzerfreundlichkeit der Benutzeroberfläche -- Version 3.4: Hinzufügen von Arxiv-Papierübersetzung, LaTeX-Papierkorrektur -- Version 3.3: Hinzufügen von Internet-Informationen -- Version 3.2: Funktionsplugins unterstützen weitere Parameter (Dialog speichern, beliebigen Code analysieren und nach beliebigen LLM-Kombinationen fragen) -- Version 3.1: Unterstützung für die gleichzeitige Abfrage mehrerer GPT-Modelle! Unterstützung für API-Schlüssel-Lastenausgleich -- Version 3.0: Unterstützung von ChatGLM und anderen kleinen LLMs -- Version 2.6: Neugestaltung der Plugin-Struktur, Verbesserung der Interaktivität, Hinzufügen weiterer Plugins -- Version 2.5: Auto-Update zur Lösung von Problemen mit zu langem Text oder Tokenüberschuss beim Zusammenfassen von Code -- Version 2.4: (1) Hinzufügen der Funktion zur Übersetzung des vollständigen PDF-Texts; (2) Neues Feature zum Wechseln der Position des Eingabebereichs; (3) Hinzufügen der Option für eine vertikale Ausrichtung; (4) Verbesserung der Multithreading-Funktionen von Plugins. -- Version 2.3: Verbesserte Multithreading-Interaktivität -- Version 2.2: Funktionsplugins können heiß neu geladen werden -- Version 2.1: Faltbare Layouts -- Version 2.0: Einführung modularer Funktionsplugins -- Version 1.0: Grundfunktionen - -Entwickler-QQ-Gruppe von GPT Academic: `610599535` - -- Bekannte Probleme - - Einige Browserübersetzungsplugins beeinflussen die Frontend-Ausführung dieser Software - - Die offizielle Version von Gradio hat derzeit viele Kompatibilitätsprobleme. Installieren Sie Gradio daher unbedingt über `requirement.txt`. - -### III: Themen -Sie können das Theme ändern, indem Sie die Option `THEME` (config.py) ändern. -1. `Chuanhu-Small-and-Beautiful` [Link](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: Entwicklungszweige dieses Projekts - -1. `master` Branch: Hauptzweig, stabile Version -2. `frontier` Branch: Entwicklungsbranch, Testversion - - -### V: Referenzen und Lernen - -``` -Der Code basiert auf dem Design anderer herausragender Projekte. Die Reihenfolge ist beliebig: - -# ChatGLM2-6B von Tsinghua: -https://github.com/THUDM/ChatGLM2-6B - -# JittorLLMs von Tsinghua: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Oobabooga One-Click-Installations: -https://github.com/oobabooga/one-click-installers - -# Weitere: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.Italian.md b/docs/README.Italian.md deleted file mode 100644 index f231e49ff792cd9510a3115f2a9415b9400efd5d..0000000000000000000000000000000000000000 --- a/docs/README.Italian.md +++ /dev/null @@ -1,360 +0,0 @@ - - - -> **Nota** -> -> Questo README è stato tradotto da GPT (implementato da un plugin di questo progetto) e non è al 100% affidabile, per favore valuta attentamente i risultati della traduzione. -> -> 2023.11.7: Quando installi le dipendenze, seleziona le versioni **specificate** nel file `requirements.txt`. Comando di installazione: `pip install -r requirements.txt`. - - -#
GPT Ottimizzazione Accademica (GPT Academic)
- -**Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!** - -Se ti piace questo progetto, dagli una stella. -Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale). - -> **Nota** -> -> 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request. -> -> 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 -> -> 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare. - - - - -
- -Funzionalità (⭐ = Nuove funzionalità recenti) | Descrizione ---- | --- -⭐[Integrazione di nuovi modelli](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) e [Wenxin](https://cloud.baidu.com/doc/GUIDE/5268.9) Intelligence, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [bookbrain](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 -Revisione, traduzione, spiegazione del codice | Revisione, traduzione, ricerca errori grammaticali nei documenti e spiegazione del codice con un clic -[Tasti di scelta rapida personalizzati](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta tasti di scelta rapida personalizzati -Design modulare | Supporto per plugin personalizzati potenti, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Analisi del codice](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Un clic per analizzare alberi di progetti Python/C/C++/Java/Lua/... o [autoanalisi](https://www.bilibili.com/video/BV1cj411A7VW) -Lettura di documenti, traduzione di documenti | [Plugin] Un clic per interpretare documenti completi in latex/pdf e generare un riassunto -Traduzione completa di testi in Latex, revisione completa di testi in Latex | [Plugin] Un clic per tradurre o correggere documenti in latex -Generazione automatica di commenti in batch | [Plugin] Un clic per generare commenti di funzione in batch -Traduzione [cinese-inglese](https://www.bilibili.com/video/BV1yo4y157jV/) in Markdown | [Plugin] Hai visto sopra i README in 5 lingue diverse ([Inglese](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md))? -Generazione di rapporti di analisi chat | [Plugin] Genera automaticamente un rapporto di sintesi dopo l'esecuzione -Funzionalità di traduzione di testo completo in PDF | [Plugin] Estrai il titolo e il riassunto dei documenti PDF e traduci tutto il testo (multithreading) -Aiutante per Arxiv | [Plugin] Inserisci l'URL dell'articolo Arxiv per tradurre riassunto e scaricare PDF in un clic -Controllo completo dei documenti in Latex | [Plugin] Rileva errori grammaticali e ortografici nei documenti in Latex simile a Grammarly + Scarica un PDF per il confronto -Assistente per Google Scholar | [Plugin] Dato qualsiasi URL della pagina di ricerca di Google Scholar, fai scrivere da GPT gli *articoli correlati* per te -Concentrazione delle informazioni di Internet + GPT | [Plugin] [Recupera informazioni da Internet](https://www.bilibili.com/video/BV1om4y127ck) utilizzando GPT per rispondere alle domande e rendi le informazioni sempre aggiornate -⭐Traduzione accurata di articoli Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] [Traduci articoli Arxiv ad alta qualità](https://www.bilibili.com/video/BV1dz4y1v77A/) con un clic, lo strumento di traduzione degli articoli migliore al mondo al momento -⭐[Inserimento della conversazione vocale in tempo reale](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] [Ascolta l'audio](https://www.bilibili.com/video/BV1AV4y187Uy/) in modo asincrono, taglia automaticamente le frasi e trova automaticamente il momento giusto per rispondere -Visualizzazione di formule, immagini, tabelle | Mostra contemporaneamente formule in formato tex e renderizzato, supporta formule e evidenziazione del codice -⭐Plugin multi-agente AutoGen | [Plugin] Esplora le possibilità dell'emergenza intelligence multi-agente con l'aiuto di Microsoft AutoGen! -Attiva il tema scuro [qui](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungi ```/?__theme=dark``` alla fine dell'URL del browser per passare al tema scuro -Supporto di più modelli LLM | Essere servito contemporaneamente da GPT3.5, GPT4, [ChatGLM2 di Tsinghua](https://github.com/THUDM/ChatGLM2-6B), [MOSS di Fudan](https://github.com/OpenLMLab/MOSS) -⭐Modello di fine-tuning ChatGLM2 | Supporto per l'importazione del modello di fine-tuning di ChatGLM2, fornendo plug-in di assistenza per il fine tuning di ChatGLM2 -Più supporto per modelli LLM, supporto del [deploy di Huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Aggiungi interfaccia Newbing (Bing Translator), introduce il supporto di [JittorLLMs](https://github.com/Jittor/JittorLLMs) di Tsinghua, supporto per [LLaMA](https://github.com/facebookresearch/llama) e [Panguα](https://openi.org.cn/pangu/) -⭐Pacchetto pip [void-terminal](https://github.com/binary-husky/void-terminal) | Fornisce funzionalità di tutti i plugin di questo progetto direttamente in Python senza GUI (in sviluppo) -⭐Plugin terminale virtuale | [Plugin] Richiama altri plugin di questo progetto utilizzando linguaggio naturale -Altre nuove funzionalità (come la generazione di immagini) ... | Vedi alla fine di questo documento ... - -
- - -- Nuovo layout (modifica l'opzione LAYOUT in `config.py` per passare tra "layout sinistra / destra" e "layout sopra / sotto") -
- -
- - -- Tutti i pulsanti vengono generati dinamicamente leggendo `functional.py`, puoi aggiungere liberamente funzionalità personalizzate, liberando la clipboard -
- -
- -- Revisione / correzione -
- -
- - - -- Se l'output contiene formule, saranno visualizzate sia in formato tex che in formato renderizzato per facilitarne la copia e la lettura. -
- -
- -- Non hai voglia di guardare il codice del progetto? Mostralo direttamente al chatgpt in bocca. -
- -
- -- Chiamate miste di modelli di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Installazione -### Metodo di installazione I: Esegui direttamente (Windows, Linux o MacOS) - -1. Scarica il progetto -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configura l'API_KEY - -Nel file `config.py`, configura l'API KEY e altre impostazioni, [clicca qui per vedere come configurare l'API in ambienti di rete speciali](https://github.com/binary-husky/gpt_academic/issues/1) . [Pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -「 Il programma controllerà prima se esiste un file di configurazione privata chiamato `config_private.py` e utilizzerà le configurazioni in esso contenute per sovrascrivere le configurazioni con lo stesso nome in `config.py`. Se comprendi questa logica di lettura, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py` e spostare (copiare) le configurazioni da `config.py` a `config_private.py` (basta copiare le voci di configurazione che hai modificato). 」 - -「 Supporta la configurazione del progetto tramite `variabili d'ambiente`, il formato di scrittura delle variabili d'ambiente è descritto nel file `docker-compose.yml` o nella nostra [pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) priorità di lettura della configurazione: `variabili d'ambiente` > `config_private.py` > `config.py`. 」 - -3. Installa le dipendenze -```sh -# (Scelta I: Se familiarizzato con python, python>=3.9) Nota: Usa il repository delle fonti ufficiale di pip o Ali pip per temporaneamente cambiare la fonte: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Scelta II: Usa Anaconda) Anche in questo caso, i passaggi sono simili (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Crea l'ambiente anaconda -conda activate gptac_venv # Attiva l'ambiente anaconda -python -m pip install -r requirements.txt # Questo passaggio è identico alla procedura di installazione con pip -``` - - -
Se desideri utilizzare il backend di ChatGLM2 di Tsinghua/Fudan MOSS/RWKV, fai clic per espandere -

- -[Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente): -```sh -# [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Supporto per Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Attenzione: eseguire questo comando nella directory principale del progetto - -# [Optional Step III] Supporto per RWKV Runner -Consulta il Wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# [Optional Step IV] Assicurati che il file di configurazione config.py includa i modelli desiderati. Di seguito sono elencati i modelli attualmente supportati (gli llm di jittorllms supportano solo la soluzione Docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Esegui -```sh -python main.py -``` - -### Metodo di installazione II: Utilizzo di Docker - -0. Installa tutte le funzionalità del progetto (Questo è un'immagine di grandi dimensioni che include cuda e latex. Potrebbe non essere adatta se hai una connessione lenta o uno spazio su disco limitato) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Modifica il file docker-compose.yml: mantieni solo la configurazione 0 e rimuovi le altre configurazioni. Avvia il seguente comando: -docker-compose up -``` - -1. ChatGPT + Wenxin Yiyu (Poem) + Spark, solo modelli online (Consigliato per la maggior parte delle persone) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Modifica il file docker-compose.yml: mantieni solo la configurazione 1 e rimuovi le altre configurazioni. Avvia il seguente comando: -docker-compose up -``` - -P.S. Se hai bisogno del plugin LaTeX, consulta la pagina Wiki. In alternativa, puoi utilizzare le configurazioni 4 o 0 direttamente per ottenere questa funzionalità. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Tongyi Q&W (Richiede conoscenze su Nvidia Docker) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Modifica il file docker-compose.yml: mantieni solo la configurazione 2 e rimuovi le altre configurazioni. Avvia il seguente comando: -docker-compose up -``` - - -### Metodo di installazione III: Altre opzioni di distribuzione -1. **Script di esecuzione con un clic per Windows**. -Se non conosci affatto l'ambiente python in Windows, puoi scaricare uno script di esecuzione con un clic dalla sezione [Release](https://github.com/binary-husky/gpt_academic/releases) per installare la versione che non richiede modelli locali. -Lo script è stato fornito da [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Utilizzo di API di terze parti, Azure, Wenxin Yiyu (Poem), Xinghuo, ecc. vedi [pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) - -3. Guida all'installazione del server cloud remoto. -Visita la [pagina Wiki sull'installazione del server cloud remoto](https://github.com/binary-husky/gpt_academic/wiki/云服务器远程部署指南). - -4. Altre nuove piattaforme o metodi di distribuzione: - - Uso di Sealos per il [deployment con un clic](https://github.com/binary-husky/gpt_academic/issues/993). - - Uso di WSL2 (Windows Subsystem for Linux). Vedi [Guida all'installazione](https://github.com/binary-husky/gpt_academic/wiki/使用WSL2(Windows-Subsystem-for-Linux-子系统)部署) per maggiori informazioni. - - Funzionamento su un sotto-percorso URL (`http://localhost/subpath`). Vedi [istruzioni FastAPI](docs/WithFastapi.md) per maggiori dettagli. - - - -# Utilizzo avanzato -### I: Personalizzare nuovi pulsanti rapidi (tasti di scelta rapida accademici) -Apri `core_functional.py` con qualsiasi editor di testo e aggiungi le seguenti voci, quindi riavvia il programma. (Se il pulsante esiste già, sia il prefisso che il suffisso possono essere modificati a caldo senza la necessità di riavviare il programma.) -Ad esempio, -``` -"Traduzione avanzata Cinese-Inglese": { - # Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc. - "Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n", - - # Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette. - "Suffisso": "", -}, -``` -
- -
- -### II: Plugin di funzioni personalizzate -Scrivi potentissimi plugin di funzioni per eseguire qualsiasi compito che desideri, sia che tu lo pensi o meno. -La scrittura di plugin per questo progetto è facile e richiede solo conoscenze di base di Python. Puoi seguire il [Guida ai Plugin di Funzione](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) per maggiori dettagli. - - -# Aggiornamenti -### I: Aggiornamenti - -1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile. -Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente. -Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML. -
- -
- -2. ⭐ Funzionalità di traduzione articoli Latex/Arxiv ⭐ -
- ===> - -
- -3. Terminale vuoto (Comprensione dell'intento dell'utente dai testi liberi + Chiamata automatica di altri plugin) - -- Passaggio 1: Digitare "Chiamare il plugin per tradurre un documento PDF, l'indirizzo è https://openreview.net/pdf?id=rJl0r3R9KX" -- Passaggio 2: Fare clic su "Terminale vuoto" - -
- -
- -4. Design modulare, interfacce semplici che supportano funzionalità potenti -
- - -
- -5. Traduzione e interpretazione di altri progetti open source -
- - -
- -6. Funzionalità leggera per [live2d](https://github.com/fghrsh/live2d_demo) (disabilitata per impostazione predefinita, richiede modifica di `config.py`) -
- -
- -7. Generazione di immagini di OpenAI -
- -
- -8. Elaborazione e riepilogo audio di OpenAI -
- -
- -9. Correzione totale del testo di Latex -
- ===> - -
- -10. Cambio linguaggio e tema -
- -
- - -### II: Versioni: -- versione 3.70 (todo): Ottimizzazione della visualizzazione del tema AutoGen e sviluppo di una serie di plugin correlati. -- versione 3.60: Introduzione di AutoGen come fondamento per i plugin della nuova generazione. -- versione 3.57: Supporto per GLM3, StarFirev3, Wenxin-yiyanv4 e correzione di bug sulla concorrenza dell'uso di modelli locali. -- versione 3.56: Possibilità di aggiungere dinamicamente pulsanti per funzionalità di base e nuova pagina di riepilogo del PDF. -- versione 3.55: Ristrutturazione dell'interfaccia utente, introduzione di finestre fluttuanti e barre dei menu. -- versione 3.54: Nuovo interprete di codice dinamico (Code Interpreter) (da perfezionare). -- versione 3.53: Possibilità di selezionare dinamicamente diversi temi dell'interfaccia utente, miglioramento della stabilità e risoluzione dei conflitti tra utenti multipli. -- versione 3.50: Utilizzo del linguaggio naturale per chiamare tutte le funzioni dei plugin di questo progetto (Terminale vuoto), supporto per la classificazione dei plugin, miglioramento dell'interfaccia utente e design di nuovi temi. -- versione 3.49: Supporto per la piattaforma Baidu Qianfan e Wenxin-yiyan. -- versione 3.48: Supporto per Alibaba DAXI 所见即所答, Shanghai AI-Lab Shusheng, Xunfei StarFire. -- versione 3.46: Supporto per la chat vocale in tempo reale completamente automatica. -- versione 3.45: Supporto personalizzato per il micro-aggiustamento del modello ChatGLM2. -- versione 3.44: Supporto ufficiale per Azure, miglioramento dell'usabilità dell'interfaccia. -- versione 3.4: + Funzionalità di traduzione di documenti arXiv e correzione di documenti LaTeX. -- versione 3.3: + Funzionalità di sintesi delle informazioni su Internet. -- versione 3.2: Il plugin di funzione supporta più interfacce dei parametri (funzionalità di salvataggio della conversazione, interpretazione di codici in qualsiasi linguaggio contemporaneamente, interrogare qualsiasi combinazione di LLM). -- versione 3.1: Supporto per l'interrogazione simultanea di più modelli GPT! Supporto per api2d, equilibrio del carico con più apikey. -- versione 3.0: Supporto per chatglm e altri piccoli llm. -- versione 2.6: Rielaborazione della struttura del plugin, miglioramento dell'interattività, aggiunta di ulteriori plugin. -- versione 2.5: Aggiornamento automatico, risoluzione del problema della lunghezza eccessiva del testo durante il riepilogo di grandi blocchi di codice che supera i token. -- versione 2.4: (1) Nuova funzionalità di traduzione di documenti PDF; (2) Nuova funzionalità di scambio delle posizioni tra l'area di input (input area); (3) Nuova opzione di layout verticale; (4) Ottimizzazione del plugin a threading multiplo. -- versione 2.3: Miglioramento dell'interattività con threading multiplo. -- versione 2.2: Supporto per il plugin con ricarica a caldo. -- versione 2.1: Layout pieghevole. -- versione 2.0: Introduzione di plugin modulari. -- versione 1.0: Funzioni di base - -GPT Academic Developer QQ Group: `610599535` - -- Problemi noti - - Alcuni plug-in di traduzione del browser possono interferire con il funzionamento del frontend di questo software - - L'app Gradio ufficiale ha molti bug di compatibilità, si consiglia di installare Gradio tramite `requirement.txt` - -### III: Temi -Il tema può essere modificato modificando l'opzione `THEME` (config.py) -1. `Chuanhu-Small-and-Beautiful` [Website](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: Branch di Sviluppo di questo progetto - -1. `master` branch: branch principale, versione stabile -2. `frontier` branch: branch di sviluppo, versione di test - - -### V: Riferimenti e Risorse di Apprendimento - -``` -Nel codice sono state utilizzate diverse idee dagli altri progetti, senza un ordine specifico: - -# ChatGLM2-6B di Tsinghua: -https://github.com/THUDM/ChatGLM2-6B - -# JittorLLMs di Tsinghua: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Installazione con un solo clic di Oobabooga: -https://github.com/oobabooga/one-click-installers - -# Altre risorse: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.Japanese.md b/docs/README.Japanese.md deleted file mode 100644 index 13cbdede24b3c716a0961b72331259ed081aab2b..0000000000000000000000000000000000000000 --- a/docs/README.Japanese.md +++ /dev/null @@ -1,344 +0,0 @@ - - - -> **注意** -> -> 此READMEはGPTによる翻訳で生成されました(このプロジェクトのプラグインによって実装されています)、翻訳結果は100%正確ではないため、注意してください。 -> -> 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。 - - -#
GPT 学術最適化 (GPT Academic)
- -**このプロジェクトが気に入った場合は、Starを付けてください。また、便利なショートカットキーまたはプラグインを作成した場合は、プルリクエストを歓迎します!** -GPTを使用してこのプロジェクトを任意の言語に翻訳するには、[`multi_language.py`](multi_language.py)を読み込んで実行します(実験的な機能)。 - -> **注意** -> -> 1. **強調された** プラグイン(ボタン)のみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。 -> -> 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。 -> -> 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。 - - - - -
- -機能(⭐= 最近追加された機能) | 説明 ---- | --- -⭐[新しいモデルの追加](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)とWenxin Yiyu, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhantu API, DALLE3 -校正、翻訳、コード解説 | 一括校正、翻訳、論文の文法エラーの検索、コードの解説 -[カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーのサポート -モジュール化された設計 | カスタムでパワフルな[プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)のサポート、プラグインの[ホットリロード](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [プラグイン] Python/C/C++/Java/Lua/...のプロジェクトツリーを簡単に解析するか、[自己解析](https://www.bilibili.com/video/BV1cj411A7VW) -論文の読み込み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [プラグイン] LaTeX/PDFの論文全文を翻訳して要約を作成する -LaTeX全文の[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [プラグイン] LaTeX論文を翻訳や校正する -一括コメント生成 | [プラグイン] 関数コメントを一括生成する -Markdownの[日英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [プラグイン] 5つの言語([英語](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)など)のREADMEをご覧になりましたか? -チャット分析レポートの生成 | [プラグイン] 実行後にサマリーレポートを自動生成する -[PDF論文全文の翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [プラグイン] PDF論文のタイトルと要約を抽出し、全文を翻訳する(マルチスレッド) -[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [プラグイン] arxiv論文のURLを入力すると、要約を翻訳してPDFをダウンロードできます -LaTeX論文の一括校正 | [プラグイン] Grammarlyのように、LaTeX論文の文法とスペルを修正して対照PDFを出力する -[Google Scholar統合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [プラグイン] 任意のGoogle Scholar検索ページのURLを指定して、関連資料をGPTに書かせることができます -インターネット情報の集約+GPT | [プラグイン] インターネットから情報を取得して質問に答え、情報が常に最新になるようにします -⭐Arxiv論文の詳細な翻訳 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [プラグイン] arxiv論文を超高品質で翻訳します。最高の論文翻訳ツールです -⭐[リアルタイム音声入力](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [プラグイン] 非同期[音声をリッスン(https://www.bilibili.com/video/BV1AV4y187Uy/)し、自動で文章を区切り、回答のタイミングを自動で探します -公式/画像/表の表示 | 公式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、公式とコードのハイライトをサポートします -⭐AutoGenマルチエージェントプラグイン | [プラグイン] Microsoft AutoGenを利用して、マルチエージェントのインテリジェントなエマージェンスを探索します -ダーク[テーマ](https://github.com/binary-husky/gpt_academic/issues/173)を起動 | ブラウザのURLに```/?__theme=dark```を追加すると、ダークテーマに切り替えられます -[複数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)のサポート | GPT3.5、GPT4、[Tsinghua ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[Fudan MOSS](https://github.com/OpenLMLab/MOSS)などを同時に使えるのは最高の感じですよね? -⭐ChatGLM2ファインチューニングモデル | ChatGLM2ファインチューニングモデルをロードして使用することができ、ChatGLM2ファインチューニングの補助プラグインが用意されています -さらなるLLMモデルの導入、[HuggingFaceデプロイのサポート](https://huggingface.co/spaces/qingxu98/gpt-academic) | Newbingインターフェース(新しいBing)の追加、Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs)の導入、[LLaMA](https://github.com/facebookresearch/llama)および[盤古α](https://openi.org.cn/pangu/)のサポート -⭐[void-terminal](https://github.com/binary-husky/void-terminal) pipパッケージ | GUIから独立して、Pythonから直接このプロジェクトのすべての関数プラグインを呼び出せます(開発中) -⭐Void Terminalプラグイン | [プラグイン] 自然言語で、このプロジェクトの他のプラグインを直接実行します -その他の新機能の紹介(画像生成など)...... | 末尾をご覧ください ...... -
- - - -- もし出力に数式が含まれている場合、TeX形式とレンダリング形式の両方で表示されます。これにより、コピーと読み取りが容易になります。 - -
- -
- -- プロジェクトのコードを見るのがめんどくさい?プロジェクト全体を`chatgpt`に広報口頭発表してもらえるよ - -
- -
- -- 異なる言語モデルの組み合わせ呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) - -
- -
- -# インストール -### 方法I:直接実行(Windows、Linux、またはMacOS) - -1. プロジェクトをダウンロード -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. APIキーを設定する - -`config.py`でAPIキーやその他の設定を設定します。特殊なネットワーク環境の設定方法については、[こちらをクリックして確認してください](https://github.com/binary-husky/gpt_academic/issues/1)。[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)も参照してください。 - -「プログラムは、`config.py`と同じ場所にある`config_private.py`という名前のプライベート設定ファイルが存在するかどうかを優先的にチェックし、同じ名前の設定をコピーします。この読み込みロジックを理解できる場合、`config.py`の横に`config_private.py`という名前の新しい設定ファイルを作成し、`config.py`の設定を転送(コピー)することを強くお勧めします(変更した設定項目だけをコピーします)。」 - -「プロジェクトを環境変数で設定することもサポートしています。環境変数の書式は、`docker-compose.yml`ファイルや[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参考にしてください。設定の優先度は、`環境変数` > `config_private.py` > `config.py`の順です。」 - -3. 依存関係をインストールする -```sh -# (オプションI:Pythonに詳しい場合、Python 3.9以上)注:公式のpipソースまたは阿里pipソースを使用し、一時的なソースの変更方法は、python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/です。 -python -m pip install -r requirements.txt - -# (オプションII:Anacondaを使用する場合)手順は同様です (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Anaconda環境を作成 -conda activate gptac_venv # Anaconda環境をアクティベート -python -m pip install -r requirements.txt # この手順はpipのインストール手順と同じです -``` - -
清華ChatGLM2/復旦MOSS/RWKVがバックエンドとしてサポートされている場合、ここをクリックして展開してください -

- -【オプションステップ】 清華ChatGLM2/復旦MOSSをバックエンドとしてサポートする場合は、さらに追加の依存関係をインストールする必要があります(前提条件:Pythonに精通していて、PytorchとNVIDIA GPUを使用したことがあり、十分なコンピュータの構成を持っていること): - -```sh -# 【オプションステップI】 清華ChatGLM2のサポートを追加する。 清華ChatGLM2に関する注意点: "Call ChatGLM fail 不能正常加载ChatGLM的参数" のエラーが発生した場合、次の手順を参照してください。 1: デフォルトでインストールされているのはtorch+cpu版です。CUDAを使用するにはtorchをアンインストールしてtorch+cuda版を再インストールする必要があります。 2: モデルをロードできない場合は、request_llm/bridge_chatglm.pyのモデル精度を変更できます。AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)をAutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)に変更します。 -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【オプションステップII】 復旦MOSSのサポートを追加する -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # このコマンドを実行するときは、プロジェクトのルートパスである必要があります。 - -# 【オプションステップIII】 RWKV Runnerのサポートを追加する -Wikiを参照してください: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# 【オプションステップIV】 config.py設定ファイルに、以下のすべてのモデルが含まれていることを確認します。以下のモデルがすべてサポートされています(jittorllmsはDockerのみサポートされています): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- -4. 実行する -```sh -python main.py -``` - -### 方法II:Dockerを使用する - -0. プロジェクトのフルスケールデプロイ(これは、CUDAとLaTeXを含む大規模なイメージですが、ネットワーク速度が遅いまたはディスク容量が小さい場合はおすすめしません) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -```sh -# docker-compose.ymlを編集し、スキーム0を残し、その他を削除してから実行する: -docker-compose up -``` - -1. ChatGPT + 文心一言 + sparkなどのオンラインモデルのみを含む(ほとんどの人におすすめ) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -```sh -# docker-compose.ymlを編集し、スキーム1を残し、その他を削除してから実行する: -docker-compose up -``` - -P.S. LaTeXプラグインの機能を使用する場合は、Wikiを参照してください。また、LaTeX機能を使用するためには、スキーム4またはスキーム0を直接使用することもできます。 - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通慧千問(Nvidia Dockerに精通している場合) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -```sh -# docker-compose.ymlを編集し、スキーム2を残し、その他を削除してから実行する: -docker-compose up -``` - - -### 方法III:その他のデプロイメントオプション - -1. **Windowsのワンクリック実行スクリプト**。 -Python環境に詳しくないWindowsユーザーは、[リリース](https://github.com/binary-husky/gpt_academic/releases)からワンクリック実行スクリプトをダウンロードして、ローカルモデルのないバージョンをインストールできます。 -スクリプトの貢献者は[oobabooga](https://github.com/oobabooga/one-click-installers)です。 - -2. 第三者のAPI、Azureなど、文心一言、星火などを使用するには、[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参照してください。 - -3. クラウドサーバーでのリモートデプロイの回避策ガイドを参照してください。 -[クラウドサーバーでのリモートデプロイの回避策ガイドwiki](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started#%E4%BA%91%E3%82%B5%E3%83%BC%E3%83%90%E3%83%BC%E3%83%AA%E3%82%BC%E3%83%A0%E3%82%B5%E3%83%BC%E3%83%90%E3%81%AE%E3%83%AA%E3%83%A2%E3%83%BC%E3%83%88%E3%83%87%E3%83%97%E3%83%AD%E3%82%A4%E6%8C%87%E5%8D%97) - -4. その他の新しいデプロイプラットフォームや方法 - - Sealosを使用した[ワンクリックデプロイ](https://github.com/binary-husky/gpt_academic/issues/993) - - WSL2(Windows Subsystem for Linux)の使用方法については、[デプロイwiki-2](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参照してください。 - - サブパス(例:`http://localhost/subpath`)でFastAPIを実行する方法については、[FastAPIの実行方法](docs/WithFastapi.md)を参照してください。 - - - -# 高度な使用法 -### I:カスタムショートカットボタンの作成(学術的なショートカットキー) -テキストエディタで`core_functional.py`を開き、次の項目を追加し、プログラムを再起動します。(ボタンが存在する場合、プレフィックスとサフィックスはホット変更に対応しており、プログラムを再起動せずに有効にすることができます。) -例: -``` -"超级英译中": { - # プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など - "プレフィックス": "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、 - - # サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。 - "サフィックス": ""、 -}、 -``` -
- -
- -### II:関数プラグインのカスタマイズ -自分の望む任意のタスクを実行するために、強力な関数プラグインを作成できます。 -このプロジェクトのプラグインの作成とデバッグの難易度は非常に低く、一定のPythonの基礎知識があれば、提供されたテンプレートを参考に自分自身のプラグイン機能を実装することができます。 -詳細については、[関数プラグインガイド](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)を参照してください。 - - -# 更新 -### I:ダイナミック - -1. 会話の保存機能。プラグインエリアで `Save Current Conversation` を呼び出すだけで、現在の会話を読み取り可能で復旧可能なhtmlファイルとして保存できます。 -また、プラグインエリア(ドロップダウンメニュー)で `Load Conversation History Archive` を呼び出すことで、以前の会話を復元できます。 -ヒント:ファイルを指定せずに `Load Conversation History Archive` をクリックすると、履歴のhtmlアーカイブのキャッシュを表示することができます。 -
- -
- -2. ⭐Latex/Arxiv論文の翻訳機能⭐ -
- ===> - -
- -3. ゼロのターミナル(自然言語入力からユーザの意図を理解+他のプラグインを自動的に呼び出す) - -- ステップ1:「プラグインのPDF論文の翻訳を呼び出してください、アドレスはhttps://openreview.net/pdf?id=rJl0r3R9KX」と入力します。 -- ステップ2:「Zero Terminal」をクリックします。 - -
- -
- -4. モジュール化された機能設計、シンプルなインターフェイスで強力な機能をサポートする -
- - -
- -5. 他のオープンソースプロジェクトの翻訳 -
- - -
- -6. [live2d](https://github.com/fghrsh/live2d_demo)のデコレーション機能(デフォルトでは無効で、`config.py`を変更する必要があります) -
- -
- -7. OpenAI画像生成 -
- -
- -8. OpenAIオーディオ解析と要約 -
- -
- -9. Latex全体の校正と修正 -
- ===> - -
- -10. 言語、テーマの切り替え -
- -
- - - -### II:バージョン: -- version 3.70(todo): AutoGenプラグインのテーマを最適化し、一連の派生プラグインを設計する -- version 3.60: AutoGenを次世代プラグインの基盤として導入 -- version 3.57: GLM3、星火v3、文心一言v4をサポート、ローカルモデルの並行バグを修正 -- version 3.56: 基本機能ボタンを動的に追加、新しい報告書PDF集約ページ -- version 3.55: フロントエンドのデザインを再構築し、浮動ウィンドウとメニューバーを導入 -- version 3.54: 新しい動的コードインタプリタ(Code Interpreter)の追加(未完成) -- version 3.53: 異なるテーマを動的に選択できるように、安定性の向上と複数ユーザの競合問題の解決 -- version 3.50: 自然言語でこのプロジェクトのすべての関数プラグインを呼び出すことができるようになりました(ゼロのターミナル)プラグインの分類をサポートし、UIを改善し、新しいテーマを設計 -- version 3.49: Baidu Qianfanプラットフォームと文心一言をサポート -- version 3.48: Alibaba DAMO Academy Tongyi Qianwen、Shanghai AI-Lab Shusheng、Xunfei Xinghuoをサポート -- version 3.46: 完全なオートモードのリアルタイム音声対話をサポート -- version 3.45: カスタムChatGLM2ファインチューニングモデルをサポート -- version 3.44: 公式にAzureをサポート、UIの使いやすさを最適化 -- version 3.4: +arxiv論文の翻訳、latex論文の校閲機能 -- version 3.3: +インターネット情報の総合機能 -- version 3.2: 関数プラグインがさらに多くのパラメータインターフェースをサポート(会話の保存機能、任意の言語のコードの解釈、同時に任意のLLMの組み合わせを尋ねる) -- version 3.1: 複数のgptモデルに同時に質問できるようにサポートされました! api2dをサポートし、複数のapikeyの負荷分散をサポートしました -- version 3.0: chatglmと他の小さなllmのサポート -- version 2.6: プラグインの構造を再構築し、対話性を高め、より多くのプラグインを追加しました -- version 2.5: 自己更新、ソースコード全体の要約時のテキストの長さ、トークンのオーバーフローの問題を解決しました -- version 2.4: (1)新しいPDF全文翻訳機能を追加しました。(2)入力エリアの位置を切り替えるための新しい機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。 -- version 2.3: マルチスレッドの対話を強化しました -- version 2.2: 関数プラグインのホットリロードをサポート -- version 2.1: 折りたたみ式のレイアウト -- version 2.0: モジュール化された関数プラグインの導入 -- version 1.0: 基本機能 - -GPT Academic開発者QQグループ:`610599535` - --既知の問題 - - 一部のブラウザ翻訳プラグインがこのソフトウェアのフロントエンドの実行を妨げる - - 公式Gradioには互換性の問題があり、必ず`requirement.txt`を使用してGradioをインストールしてください - -### III:テーマ -`THEME`オプション(`config.py`)を変更することで、テーマを変更できます -1. `Chuanhu-Small-and-Beautiful` [リンク](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV:本プロジェクトの開発ブランチ - -1. `master`ブランチ:メインブランチ、安定版 -2. `frontier`ブランチ:開発ブランチ、テスト版 - - -### V:参考と学習 - -``` -コードの中には、他の優れたプロジェクトのデザインを参考にしたものが多く含まれています。順序は問いません: - -# 清華ChatGLM2-6B: -https://github.com/THUDM/ChatGLM2-6B - -# 清華JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Oobaboogaワンクリックインストーラー: -https://github.com/oobabooga/one-click-installers - -# その他: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.Korean.md b/docs/README.Korean.md deleted file mode 100644 index 2bd1f577cfa10888016873a9befc64d63a23b287..0000000000000000000000000000000000000000 --- a/docs/README.Korean.md +++ /dev/null @@ -1,363 +0,0 @@ - - - -> **참고** -> -> 이 README는 GPT 번역으로 생성되었습니다 (이 프로젝트의 플러그인에 의해 구현됨) . 100% 신뢰할 수 없으므로 번역 결과를 주의 깊게 검토하십시오. -> -> 2023.11.7: 종속성을 설치할 때, `requirements.txt`에 **지정된 버전**을 선택하십시오. 설치 명령어: `pip install -r requirements.txt`. - - - - -#
GPT 학술 최적화 (GPT Academic)
- -**이 프로젝트가 마음에 드신다면, Star를 부탁드립니다. 편리한 단축키나 플러그인을 발견하셨다면 Pull Request를 환영합니다!** -GPT를 사용하여 이 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오 (실험적). - - -> **참고** -> -> 1. **강조 표시**된 플러그인 (버튼)만 파일을 읽을 수 있습니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인에 대한 모든 PR을 환영하며, 이를 **가장 우선적**으로 처리합니다. -> -> 2. 이 프로젝트의 각 파일의 기능은 [자체 분석 보고서 `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%EC%A0%9C%ED%94%84%EB%AA%85%EC%84%B1%EB%B0%A9%EC%8B%9D%EC%9D%98_%EA%B2%B0%EA%B3%BC)에서 자세히 설명되어 있습니다. 버전이 반복됨에 따라, 관련 기능 플러그인을 언제든지 클릭하여 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki)를 참조하십시오. [일반적인 설치 방법](#installation) | [원클릭 설치 스크립트](https://github.com/binary-husky/gpt_academic/releases) | [설정 설명서](https://github.com/binary-husky/gpt_academic/wiki/%EC%84%A4%EC%A0%95%EC%82%AC%EB%AA%85_%EA%B0%84%EB%8B%A8_%EC%84%B8%ED%8A%B8%EB%B2%84_%EC%B6%94%EA%B0%80) - - -> 3. 이 프로젝트는 ChatGLM 등 대형 언어 모델 (ChatGLM 등) 실행을 지원하고 권장합니다. 여러 개의 API 키를 동시에 사용할 수 있으며, 구성 파일에 `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`와 같이 입력할 수 있습니다. `API_KEY`를 일시적으로 변경해야 하는 경우, 입력 영역에 임시 `API_KEY`를 입력한 다음 Enter 키를 누르면 적용됩니다. - - - - - -
- -기능 (⭐= 최근 추가 기능) | 설명 ---- | --- -⭐[새 모델 추가](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)와 Wenxin Yiyan, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Star](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 -문체 개선, 번역, 코드 설명 | 일괄적인 문체 개선, 번역, 논문 문법 오류 탐색, 코드 설명 -[사용자 정의 단축키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 단축키 지원 -모듈화 설계 | 사용자 정의 가능한 강력한 [플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인 지원 [핫 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [플러그인] 한 번에 Python/C/C++/Java/Lua/... 프로젝트 트리를 분석하거나 [자체 분석](https://www.bilibili.com/video/BV1cj411A7VW) -논문 읽기, 논문 [번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [플러그인] LaTeX/PDF 논문 전문을 읽고 요약 생성 -LaTeX 전체 [번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [개선](https://www.bilibili.com/video/BV1FT411H7c5/) | [플러그인] LaTeX 논문 번역 또는 개선 -일괄 주석 생성 | [플러그인] 함수 주석 일괄 생성 -Markdown [한 / 영 번역](https://www.bilibili.com/video/BV1yo4y157jV/) | 위의 5개 언어로 작성된 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 살펴보셨나요? -채팅 분석 보고서 생성 | [플러그인] 실행 후 요약 보고서 자동 생성 -[PDF 논문 전체 번역](https://www.bilibili.com/video/BV1KT411x7Wn) 기능 | [플러그인] PDF 논문 제목 및 요약 추출 + 전체 번역 (멀티 스레드) -[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [플러그인] arxiv 논문 url 입력시 요약 번역 + PDF 다운로드 -LaTeX 논문 일괄 교정 | [플러그인] Grammarly를 모사하여 LaTeX 논문에 대한 문법 및 맞춤법 오류 교정 + 대조 PDF 출력 -[Google 학술 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | 임의의 Google 학술 검색 페이지 URL을 지정하여 gpt가 [related works를 작성](https://www.bilibili.com/video/BV1GP411U7Az/)하게 해주세요. -인터넷 정보 집계 + GPT | [플러그인] [인터넷에서 정보를 가져와서](https://www.bilibili.com/video/BV1om4y127ck) 질문에 대답하도록 GPT를 자동화하세요. 정보가 절대로 오래되지 않도록 해줍니다. -⭐Arxiv 논문 세심한 번역 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [플러그인] [arxiv 논문을 고품질 번역으로](https://www.bilibili.com/video/BV1dz4y1v77A/) 번역하는 최고의 도구 -⭐[실시간 음성 대화 입력](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [플러그인] 비동기적으로 [오디오를 모니터링](https://www.bilibili.com/video/BV1AV4y187Uy/)하여 문장을 자동으로 분절하고 대답 시기를 자동으로 찾습니다. -수식/이미지/표 표시 | [tex 형식 및 렌더링 형식](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)의 수식을 동시에 표시하며, 수식 및 코드 하이라이트 지원 -⭐AutoGen multi-agent 플러그인 | [플러그인] Microsoft AutoGen을 활용하여 여러 개의 에이전트가 지능적으로 발생하는 가능성을 탐색하세요! -다크 모드 주제 지원 | 브라우저의 URL 뒤에 ```/?__theme=dark```를 추가하여 다크 모드로 전환하세요. -[다양한 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원 | GPT3.5, GPT4, [Tsinghua ChatGLM2](https://github.com/THUDM/ChatGLM2-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)을 함께 사용하는 느낌은 좋을 것입니다, 그렇지 않습니까? -⭐ChatGLM2 fine-tuned 모델 | ChatGLM2 fine-tuned 모델 로드를 지원하며, ChatGLM2 fine-tuned 보조 플러그인 제공 -더 많은 LLM 모델 연결, [huggingface 배포](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | Newbing 인터페이스(신 밍), Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) 도입, [LLaMA](https://github.com/facebookresearch/llama)와 [Pangu-alpha](https://openi.org.cn/pangu/)를 지원합니다. -⭐[void-terminal](https://github.com/binary-husky/void-terminal) 패키지 | GUI에서 독립, Python에서 이 프로젝트의 모든 함수 플러그인을 직접 호출 (개발 중) -⭐Void 터미널 플러그인 | [플러그인] 자연어로 이 프로젝트의 다른 플러그인을 직접 영속합니다. -기타 새로운 기능 소개 (이미지 생성 등) …… | 본 문서 맨 끝 참조 …… -
- - -- 새로운 인터페이스(`config.py`의 LAYOUT 옵션 수정으로 "왼쪽-오른쪽 레이아웃"과 "위-아래 레이아웃"을 전환할 수 있음) -
- -
- - -- 모든 버튼은 functional.py를 동적으로 읽어 생성되므로 원하는대로 사용자 정의 기능을 추가할 수 있으며 클립 보드를 해제할 수 있습니다. -
- -
- -- 문체 개선/오류 수정 -
- -
- - - -- If the output contains equations, they will be displayed in both tex format and rendered format for easy copying and reading. -
- -
- -- Don't feel like looking at the project code? Just give it to ChatGPT and let it dazzle you. -
- -
- -- Mix and match multiple powerful language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Installation -### Installation Method I: Run Directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure the API KEY and other settings, [click here to view special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 - -" The program will first check if there is a confidential configuration file named `config_private.py` and use its configuration to override the configuration with the same name in `config.py`. If you can understand this reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configuration from `config.py` to `config_private.py` (only copy the modified configuration items). " - -" You can configure the project through `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). The priority of the configuration reading is: `environment variables` > `config_private.py` > `config.py`. " - -3. Install dependencies -```sh -# (Option I: if familiar with python, python>=3.9) Note: Use the official pip source or Aliyun pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an Anaconda environment -conda activate gptac_venv # Activate the Anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation step -``` - - -
Click here to expand if you need support for Tsinghua ChatGLM2/Fudan MOSS/RWKV backend -

- -[Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration): -```sh -# [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Support for Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, make sure you are in the project root path - -# [Optional Step III] Support for RWKV Runner -Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# [Optional Step IV] Make sure that the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. The currently supported models are as follows (the jittorllms series only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -``` - -### Installation Method II: Use Docker - -0. Deploy all the capabilities of the project (this is a large image that includes cuda and latex. However, it is not recommended if your internet speed is slow or your hard disk is small) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 0 and delete the others. Then run: -docker-compose up -``` - -1. ChatGPT+Random Quotes+Wikipedia Summary+Spark and other online models (recommended for most people) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 1 and delete the others. Then run: -docker-compose up -``` - -P.S. If you need the Latex plugin feature, please refer to the Wiki. Additionally, you can also use scheme 4 or scheme 0 directly to get the Latex feature. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Thousand Questions (Requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Modify docker-compose.yml, keep scheme 2 and delete the others. Then run: -docker-compose up -``` - - -### Installation Method III: Other Deployment Methods -1. **One-click run script for Windows**. -Windows users who are completely unfamiliar with the Python environment can download the one-click run script without local models from the [Release](https://github.com/binary-husky/gpt_academic/releases) section. -The script contribution comes from [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Use third-party APIs, Azure, etc., Random Quotes, Spark, etc., see the [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -3. Pitfall guide for remote deployment on cloud servers. -Please visit the [cloud server remote deployment wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -4. Some new deployment platforms or methods - - Use Sealos for [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). - - Use WSL2 (Windows Subsystem for Linux). Please visit [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - How to run in a subpath (such as `http://localhost/subpath`). Please refer to [FastAPI running instructions](docs/WithFastapi.md) - - - -# 고급 사용법 -### I: 사용자 정의 바로 가기 버튼 추가 (학술 단축키) -임의의 텍스트 편집기로 `core_functional.py` 파일을 열고 다음과 같은 항목을 추가한 다음 프로그램을 다시 시작하십시오. (이미 버튼이 있는 경우에는 접두사와 접미사를 실시간으로 수정할 수 있으므로 프로그램을 다시 시작할 필요가 없습니다.) -예시: -``` -"초급영문 번역": { - # 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등 - "Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n", - - # 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다. - "Suffix": "", -}, -``` -
- -
- -### II: 사용자 정의 함수 플러그인 -원하는 작업을 수행하기 위해 능력있는 함수 플러그인을 작성하세요. -이 프로젝트의 플러그인 작성 및 디버깅은 난이도가 낮으며, 일정한 Python 기본 지식만 있으면 우리가 제공하는 템플릿을 본따서 고유한 플러그인 기능을 구현할 수 있습니다. -자세한 내용은 [함수 플러그인 가이드](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 참조하세요. - - -# 업데이트 -### I: 다이나믹 - -1. 대화 저장 기능. 플러그인 영역에서 '현재 대화 저장'을 호출하여 현재 대화를 볼 수 있고, html 파일을 복구할 수 있습니다. -또한 플러그인 영역에서 '대화 기록 불러오기'를 호출하여 이전 대화를 복원할 수 있습니다. -팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 바로 클릭하면 이전 html 기록 캐시를 볼 수 있습니다. -
- -
- -2. ⭐Latex/Arxiv 논문 번역 기능⭐ -
- ===> - -
- -3. 빈 터미널 (자연어 입력에서 사용자 의도 이해 + 자동 플러그인 호출) - -- 단계 1: "플러그인을 사용하여 PDF 논문을 번역하십시오. 주소는 https://openreview.net/pdf?id=rJl0r3R9KX입니다." 입력 -- 단계 2: "빈 터미널" 클릭 - -
- -
- -4. 모듈화된 기능 디자인, 간단한 인터페이스로 강력한 기능 제공 -
- - -
- -5. 다른 오픈 소스 프로젝트 번역 -
- - -
- -6. [live2d](https://github.com/fghrsh/live2d_demo)의 작은 기능 추가 (기본 설정은 닫혀 있으며, `config.py`를 수정해야 합니다.) -
- -
- -7. OpenAI 이미지 생성 -
- -
- -8. OpenAI 오디오 분석 및 요약 -
- -
- -9. Latex 전체 교정 오류 -
- ===> - -
- -10. 언어, 테마 변경 -
- -
- - - -### II: 버전: -- 버전 3.70 (예정): AutoGen 플러그인 테마 개선 및 다른 테마 플러그인 디자인 -- 버전 3.60: AutoGen을 새로운 세대 플러그인의 기반으로 도입 -- 버전 3.57: GLM3, Starfire v3, 文心一言 v4 지원, 로컬 모델의 동시성 버그 수정 -- 버전 3.56: 동적으로 기본 기능 버튼 추가, 새로운 보고서 PDF 요약 페이지 -- 버전 3.55: 프론트 엔드 인터페이스 리팩토링, 화면 따라다니는 윈도우 및 메뉴 바 도입 -- 버전 3.54: 새로운 동적 코드 해석기 (Code Interpreter) 추가 (완벽하게 완성되지 않음) -- 버전 3.53: 다른 인터페이스 테마 동적 선택 기능 추가, 안정성 향상 및 다중 사용자 충돌 문제 해결 -- 버전 3.50: 자연어로 이 프로젝트의 모든 함수 플러그인을 호출하는 기능 (빈 터미널) 추가, 플러그인 분류 지원, UI 개선, 새로운 테마 설계 -- 버전 3.49: Baidu Qianfan 플랫폼 및 문심일언 지원 -- 버전 3.48: Ali DameiYuan Sematic Query, Shanghai AI-Lab Shusheng, Xunfei Starfire 지원 -- 버전 3.46: 완전 자동 운전 가능한 실시간 음성 대화 지원 -- 버전 3.45: 사용자 정의 ChatGLM2 fine-tuning 모델 지원 -- 버전 3.44: Azure 정식 지원, 인터페이스의 사용 편의성 개선 -- 버전 3.4: +arxiv 논문 번역, latex 논문 교정 기능 추가 -- 버전 3.3: +인터넷 정보 종합 기능 -- 버전 3.2: 함수 플러그인이 더 많은 매개변수 인터페이스를 지원합니다 (대화 저장 기능, 임의의 언어 코드 해석 + 임의의 LLM 조합을 동시에 요청) -- 버전 3.1: 여러 GPT 모델에 동시에 질문할 수 있는 기능 추가! api2d 지원, 여러 개의 apikey 부하 균형 조정 지원 -- 버전 3.0: chatglm 및 기타 소규모 llm 지원 -- 버전 2.6: 플러그인 구조를 재구성하여 상호 작용성 향상, 더 많은 플러그인 추가 -- 버전 2.5: 자동 업데이트, 소스 코드 요약 중 텍스트가 너무 길고 토큰이 오버플로되는 문제 해결 -- 버전 2.4: (1)PDF 전체 번역 기능 추가; (2)입력 영역 위치 전환 기능 추가; (3)수직 레이아웃 옵션 추가; (4)멀티 스레드 함수 플러그인 최적화 -- 버전 2.3: 멀티 스레드 상호 작용성 강화 -- 버전 2.2: 함수 플러그인의 핫 리로드 지원 -- 버전 2.1: 접을 수 있는 레이아웃 -- 버전 2.0: 모듈화 함수 플러그인 도입 -- 버전 1.0: 기본 기능 - -GPT Academic 개발자 QQ 그룹: `610599535` -- 알려진 문제 - - 특정 웹 브라우저 번역 플러그인이 이 소프트웨어의 프론트엔드 실행에 방해가 되는 경우가 있습니다. - - 공식 Gradio에는 호환성 문제가 많기 때문에 `requirement.txt`를 사용하여 Gradio를 설치하십시오. - -### III: 테마 -`THEME` 옵션 (`config.py`)을 수정하여 테마를 변경할 수 있습니다. -1. `Chuanhu-Small-and-Beautiful` [URL](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: 이 프로젝트의 개발 브랜치 - -1. `master` 브랜치: 메인 브랜치, 안정 버전 -2. `frontier` 브랜치: 개발 브랜치, 테스트 버전 - - -### V: 참고 및 학습 - -``` -코드에서는 다른 우수한 프로젝트의 디자인을 많이 참고했습니다. 순서는 문제 없이 나열됩니다: - -# 清华ChatGLM2-6B: -https://github.com/THUDM/ChatGLM2-6B - -# 清华JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Oobabooga 원 클릭 설치 프로그램: -https://github.com/oobabooga/one-click-installers - -# 더보기: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.Portuguese.md b/docs/README.Portuguese.md deleted file mode 100644 index 4cc02c19ebbb900c4424d60d680d654d4bc3fe8d..0000000000000000000000000000000000000000 --- a/docs/README.Portuguese.md +++ /dev/null @@ -1,357 +0,0 @@ - - - -> **Nota** -> -> Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução. -> -> 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`. - -#
GPT Acadêmico
- -**Se você gosta deste projeto, por favor, dê uma estrela nele. Se você inventou atalhos de teclado ou plugins úteis, fique à vontade para criar pull requests!** -Para traduzir este projeto para qualquer idioma utilizando o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental). - -> **Nota** -> -> 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR. -> -> 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 -> -> 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo. - - -
- -Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição ---- | --- -⭐[Integração com novos modelos](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) da Baidu, Wenxin e [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), [Shusheng](https://github.com/InternLM/InternLM) da Shanghai AI-Lab, [Xinghuo](https://xinghuo.xfyun.cn/) da Iflytek, [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 -Aprimoramento, tradução, explicação de códigos | Aprimoramento com um clique, tradução, busca de erros gramaticais em artigos e explicação de códigos -[Atalhos de teclado personalizados](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte para atalhos de teclado personalizados -Design modular | Suporte a plugins poderosos e personalizáveis, plugins com suporte a [atualização a quente](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Análise de código](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Análise instantânea da estrutura de projetos em Python/C/C++/Java/Lua/... ou [autoanálise](https://www.bilibili.com/video/BV1cj411A7VW) -Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin] Interpretação instantânea de artigos completos em latex/pdf e geração de resumos -Tradução completa de artigos em latex [PDF](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [aprimoramento](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin] Tradução completa ou aprimoramento de artigos em latex com um clique -Geração em lote de comentários | [Plugin] Geração em lote de comentários de funções com um clique -Tradução (inglês-chinês) de Markdown | [Plugin] Você já viu o [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) nas 5 línguas acima? -Criação de relatório de análise de bate-papo | [Plugin] Geração automática de relatório de resumo após a execução -Tradução [completa de artigos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extração de título e resumo de artigos em PDF + tradução completa (multithreading) -Auxiliar Arxiv | [Plugin] Insira o URL de um artigo Arxiv para traduzir o resumo + baixar o PDF com um clique -Correção automática de artigos em latex | [Plugin] Correções gramaticais e ortográficas de artigos em latex semelhante ao Grammarly + saída PDF comparativo -Auxiliar Google Scholar | [Plugin] Insira qualquer URL da busca do Google Acadêmico e deixe o GPT [escrever trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/) para você -Agregação de informações da Internet + GPT | [Plugin] Capturar informações da Internet e obter respostas de perguntas com o GPT em um clique, para que as informações nunca fiquem desatualizadas -⭐Tradução refinada de artigos do Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Tradução de alta qualidade de artigos do Arxiv com um clique, a melhor ferramenta de tradução de artigos atualmente -⭐Entrada de conversa de voz em tempo real | [Plugin] Monitoramento de áudio [assíncrono](https://www.bilibili.com/video/BV1AV4y187Uy/), segmentação automática de frases, detecção automática de momentos de resposta -Exibição de fórmulas, imagens e tabelas | Exibição de fórmulas em formato tex e renderizadas simultaneamente, suporte a fórmulas e destaque de código -⭐Plugin AutoGen para vários agentes | [Plugin] Explore a emergência de múltiplos agentes com o AutoGen da Microsoft! -Ativar o tema escuro | Adicione ```/?__theme=dark``` ao final da URL para alternar para o tema escuro -Suporte a múltiplos modelos LLM | Ser atendido simultaneamente pelo GPT3.5, GPT4, [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) do Tsinghua University e [MOSS](https://github.com/OpenLMLab/MOSS) da Fudan University se sente incrível, não é mesmo? -⭐Modelo de ajuste fino ChatGLM2 | Suporte para carregar o modelo ChatGLM2 ajustado e fornecer plugins de assistência ao ajuste fino do ChatGLM2 -Mais modelos LLM e suporte para [implantação pela HuggingFace](https://huggingface.co/spaces/qingxu98/gpt-academic) | Integração com a interface Newbing (Bing novo), introdução do [Jittorllms](https://github.com/Jittor/JittorLLMs) da Tsinghua University com suporte a [LLaMA](https://github.com/facebookresearch/llama) e [Panguα](https://openi.org.cn/pangu/) -⭐Pacote pip [void-terminal](https://github.com/binary-husky/void-terminal) | Chame todas as funções plugins deste projeto diretamente em Python, sem a GUI (em desenvolvimento) -⭐Plugin Terminal do Vácuo | [Plugin] Chame outros plugins deste projeto diretamente usando linguagem natural -Apresentação de mais novas funcionalidades (geração de imagens, etc.) ... | Veja no final deste documento ... - -
- - -- Nova interface (altere a opção LAYOUT em `config.py` para alternar entre os "Layouts de lado a lado" e "Layout de cima para baixo") -
- -
- - -- Todos os botões são gerados dinamicamente através da leitura do `functional.py`, você pode adicionar funcionalidades personalizadas à vontade, liberando sua área de transferência -
- -
- -- Aprimoramento/Correção -
- -
- - - -- Se a saída contiver fórmulas, elas serão exibidas tanto em formato tex quanto renderizado para facilitar a cópia e a leitura. -
- -
- -- Não tem vontade de ver o código do projeto? O projeto inteiro está diretamente na boca do chatgpt. -
- -
- -- Combinação de vários modelos de linguagem (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Instalação -### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS) - -1. Baixe o projeto -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure a API_KEY - -No arquivo `config.py`, configure a API KEY e outras configurações. [Clique aqui para ver o método de configuração em redes especiais](https://github.com/binary-husky/gpt_academic/issues/1). [Página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -「 O programa verificará primeiro se existe um arquivo de configuração privada chamado `config_private.py` e substituirá as configurações correspondentes no arquivo `config.py`. Se você entender essa lógica de leitura, é altamente recomendável criar um novo arquivo de configuração chamado `config_private.py` ao lado do `config.py` e copiar as configurações do `config.py` para o `config_private.py` (copiando apenas os itens de configuração que você modificou). 」 - -「 Suporte para configurar o projeto por meio de `variáveis de ambiente`, o formato de gravação das variáveis de ambiente pode ser encontrado no arquivo `docker-compose.yml` ou em nossa [página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). A prioridade de leitura das configurações é: `variáveis de ambiente` > `config_private.py` > `config.py`. 」 - - -3. Instale as dependências -```sh -# (Opção I: Se você está familiarizado com o Python, Python>=3.9) Observação: Use o pip oficial ou o pip da Aliyun. Método temporário para alternar fontes: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Opção II: Use o Anaconda) Os passos também são semelhantes (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Crie um ambiente do Anaconda -conda activate gptac_venv # Ative o ambiente do Anaconda -python -m pip install -r requirements.txt # Este passo é igual ao da instalação do pip -``` - - -
Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan/RWKV como backend, clique para expandir -

- -[Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente): -```sh -# [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Opcional Passo II] Suporte para MOSS do Fudan -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Observe que você deve estar no diretório raiz do projeto ao executar este comando - -# [Opcional Passo III] Suporte para RWKV Runner -Consulte a página Wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner - -# [Opcional Passo IV] Verifique se o arquivo de configuração config.py contém os modelos desejados, os modelos compatíveis são os seguintes (a série jittorllms suporta apenas a solução Docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Execute -```sh -python main.py -``` - -### Método de instalação II: Usando o Docker - -0. Implante todas as capacidades do projeto (este é um contêiner grande que inclui CUDA e LaTeX. Não recomendado se você tiver uma conexão lenta com a internet ou pouco espaço em disco) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Modifique o arquivo docker-compose.yml para incluir apenas a seção 0 e excluir as outras seções. Em seguida, execute: -docker-compose up -``` - -1. ChatGPT + 文心一言 + spark + outros modelos online (recomendado para a maioria dos usuários) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Modifique o arquivo docker-compose.yml para incluir apenas a seção 1 e excluir as outras seções. Em seguida, execute: -docker-compose up -``` - -Obs.: Se você precisar do plugin Latex, consulte a Wiki. Além disso, você também pode usar a seção 4 ou 0 para obter a funcionalidade do LaTeX. - -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问 (você precisa estar familiarizado com o [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) para executar este modo) -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Modifique o arquivo docker-compose.yml para incluir apenas a seção 2 e excluir as outras seções. Em seguida, execute: -docker-compose up -``` - - -### Método de instalação III: Outros métodos de implantação -1. **Script de execução com um clique para Windows**. -Usuários do Windows que não estão familiarizados com o ambiente Python podem baixar o script de execução com um clique da [Release](https://github.com/binary-husky/gpt_academic/releases) para instalar a versão sem modelos locais. -A contribuição do script vem de [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Usar APIs de terceiros, Azure, etc., 文心一言, 星火, consulte a [página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). - -3. Guia para evitar armadilhas na implantação em servidor em nuvem. -Consulte o [wiki de implantação em servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). - -4. Algumas novas plataformas ou métodos de implantação - - Use Sealos [implantação com um clique](https://github.com/binary-husky/gpt_academic/issues/993). - - Use o WSL2 (Subsistema do Windows para Linux). Consulte [wiki de implantação](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2). - - Como executar em um subdiretório da URL (como `http://localhost/subpath`). Consulte [instruções de execução com o FastAPI](docs/WithFastapi.md) - - - -# Uso Avançado -### I: Personalização de Novos Botões de Atalho (Atalhos Acadêmicos) -Abra o arquivo `core_functional.py` em qualquer editor de texto, adicione o seguinte item e reinicie o programa. (Se o botão já existir, o prefixo e o sufixo podem ser modificados a qualquer momento sem reiniciar o programa). -Por exemplo: -``` -"超级英译中": { - # Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc. - "Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n", - - # Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas. - "Suffix": "", -}, -``` -
- -
- -### II: Personalização de Funções Plugins -Crie poderosos plugins de função para executar tarefas que você pode e não pode imaginar. -Criar plugins neste projeto é fácil, basta seguir o modelo fornecido, desde que você tenha conhecimento básico de Python. -Consulte o [Guia dos Plugins de Função](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) para mais detalhes. - - -# Atualizações -### I: Dinâmico - -1. Função de salvar conversas. Chame a função "Salvar a conversa atual" na área de plugins para salvar a conversa atual em um arquivo HTML legível e recuperável. Além disso, chame a função "Carregar histórico de conversas" na área de plugins (menu suspenso) para restaurar conversas anteriores. -Dica: Se você clicar diretamente em "Carregar histórico de conversas" sem especificar o arquivo, poderá visualizar o cache do histórico do arquivo HTML. -
- -
- -2. ⭐Tradução de artigos Latex/Arxiv⭐ -
- ===> - -
- -3. Terminal vazio (entendendo a intenção do usuário a partir do texto em linguagem natural e chamando automaticamente outros plugins) - -- Passo 1: Digite "Por favor, chame o plugin 'Traduzir artigo PDF' e forneça o link https://openreview.net/pdf?id=rJl0r3R9KX" -- Passo 2: Clique em "Terminal vazio" - -
- -
- -4. Design de recursos modular, interface simples com suporte a recursos poderosos -
- - -
- -5. Tradução e interpretação de outros projetos de código aberto -
- - -
- -6. Recursos adicionais para [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, requer modificação no arquivo `config.py`) -
- -
- -7. Geração de imagens pela OpenAI -
- -
- -8. Análise e resumo de áudio pela OpenAI -
- -
- -9. Correção de erros em texto e código LaTeX -
- ===> - -
- -10. Alternância de idioma e tema -
- -
- - - -### II: Versões: -- Versão 3.70 (a fazer): Melhorar o plugin AutoGen e projetar uma série de plugins relacionados. -- Versão 3.60: Introdução do AutoGen como base para a próxima geração de plugins. -- Versão 3.57: Suporte para GLM3, Starfire v3, Wenxin Yiyan v4, correção de bugs relacionados a modelos locais executados simultaneamente. -- Versão 3.56: Suporte para adicionar dinamicamente botões de função básicos e nova página de resumo em PDF. -- Versão 3.55: Reformulação da interface do usuário, introdução de janelas flutuantes e menus. -- Versão 3.54: Novo interpretador de código dinâmico (Code Interpreter) (em desenvolvimento) -- Versão 3.53: Suporte para alterar dinamicamente o tema da interface, melhorias de estabilidade e correção de conflitos entre vários usuários. -- Versão 3.50: Chamada de todas as funções de plugins deste projeto usando linguagem natural (Terminal vazio), suporte a categorização de plugins, melhorias na interface do usuário e design de novos temas. -- Versão 3.49: Suporte para Baidu Qianfan Platform e Wenxin Yiyan. -- Versão 3.48: Suporte para Alibaba DAMO Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng e Xunfei Xinghuo. -- Versão 3.46: Suporte para diálogos em tempo real totalmente automáticos. -- Versão 3.45: Suporte para personalização do modelo ChatGLM2. -- Versão 3.44: Suporte oficial ao Azure, aprimoramentos na usabilidade da interface. -- Versão 3.4: Tradução completa de artigos Arxiv/Latex, correção de artigos Latex. -- Versão 3.3: Funcionalidade de consulta a informações na internet. -- Versão 3.2: Maior suporte para parâmetros de função de plugins (função de salvar conversas, interpretação de código em qualquer linguagem + perguntas sobre combinações LLM arbitrariamente). -- Versão 3.1: Suporte para fazer perguntas a modelos GPT múltiplos! Suporte para API2D, balanceamento de carga em vários APIKeys. -- Versão 3.0: Suporte para chatglm e outros pequenos modelos LLM. -- Versão 2.6: Refatoração da estrutura de plugins, melhoria na interação, adição de mais plugins. -- Versão 2.5: Auto-atualizável, resolve problemas de texto muito longo ou estouro de tokens ao resumir grandes projetos de código. -- Versão 2.4: (1) Novo recurso de tradução completa de PDF; (2) Nova função para alternar a posição da área de input; (3) Nova opção de layout vertical; (4) Melhoria dos plugins de função em várias threads. -- Versão 2.3: Melhorias na interação em várias threads. -- Versão 2.2: Suporte para recarregar plugins sem reiniciar o programa. -- Versão 2.1: Layout dobrável. -- Versão 2.0: Introdução de plugins de função modular. -- Versão 1.0: Funcionalidades básicas. - -GPT Academic QQ Group: `610599535` - -- Problemas conhecidos - - Alguns plugins de tradução de navegadores podem interferir na execução deste software. - - A biblioteca Gradio possui alguns bugs de compatibilidade conhecidos. Certifique-se de instalar o Gradio usando o arquivo `requirement.txt`. - -### III: Temas -Você pode alterar o tema atualizando a opção `THEME` (config.py). -1. `Chuanhu-Small-and-Beautiful` [Link](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: Branches de Desenvolvimento deste Projeto - -1. Branch `master`: Branch principal, versão estável. -2. Branch `frontier`: Branch de desenvolvimento, versão de teste. - - -### V: Referências para Aprendizado - -``` -O código referenciou muitos projetos excelentes, em ordem aleatória: - -# Tsinghua ChatGLM2-6B: -https://github.com/THUDM/ChatGLM2-6B - -# Tsinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Oobabooga instalador com um clique: -https://github.com/oobabooga/instaladores-de-um-clique - -# Mais: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.Russian.md b/docs/README.Russian.md deleted file mode 100644 index 471f17449884c06a63bcd20905fd263c574bf869..0000000000000000000000000000000000000000 --- a/docs/README.Russian.md +++ /dev/null @@ -1,360 +0,0 @@ - - - -> **Примечание** -> -> Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода. -> -> 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`. - - -#
GPT Academic (GPT Академический)
- -**Если вам нравится этот проект, пожалуйста, поставьте звезду; если у вас есть удобные горячие клавиши или плагины, приветствуются pull requests!** -Чтобы перевести этот проект на произвольный язык с помощью GPT, прочтите и выполните [`multi_language.py`](multi_language.py) (экспериментально). - -> **Примечание** -> -> 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**. -> -> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). -> -> 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения. - - - - -
- -Функции (⭐= Недавно добавленные функции) | Описание ---- | --- -⭐[Подключение новой модели](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [QianFan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) и WenxinYiYan, [TongYiQianWen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [ShuSheng](https://github.com/InternLM/InternLM), Xunfei [XingHuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), ZhiPu API, DALLE3 -Улучшение, перевод, объяснение кода | Одним нажатием выполнить поиск синтаксических ошибок в научных статьях, переводить, объяснять код -[Настройка горячих клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки горячих клавиш -Модульный дизайн | Поддержка настраиваемых мощных [плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Профилирование кода](https://www.bilibili.com/video/BV1cj411A7VW) | [Плагин] Одним нажатием можно профилировать дерево проекта Python/C/C++/Java/Lua/... или [проанализировать самого себя](https://www.bilibili.com/video/BV1cj411A7VW) -Просмотр статей, перевод статей | [Плагин] Одним нажатием прочитать полный текст статьи в формате LaTeX/PDF и сгенерировать аннотацию -Перевод LaTeX статей, [улучшение](https://www.bilibili.com/video/BV1FT411H7c5/)| [Плагин] Одним нажатием перевести или улучшить статьи в формате LaTeX -Генерация пакетного комментария | [Плагин] Одним нажатием сгенерировать многострочный комментарий к функции -Перевод Markdown на английский и китайский | [Плагин] Вы видели документацию на сверху на пяти языках? [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)` -Анализ и создание отчета в формате чата | [Плагин] Автоматически генерируйте сводный отчет после выполнения -Функция перевода полноценной PDF статьи | [Плагин] Изъять название и аннотацию статьи из PDF + переводить полный текст (многопоточно) -[Arxiv помощник](https://www.bilibili.com/video/BV1LM4y1279X) | [Плагин] Просто введите URL статьи на arXiv, чтобы одним нажатием выполнить перевод аннотации + загрузить PDF -Одним кликом проверить статью на LaTeX | [Плагин] Проверка грамматики и правописания статьи LaTeX, добавление PDF в качестве справки -[Помощник Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Плагин] Создайте "related works" с помощью Google Scholar URL по вашему выбору. -Агрегирование интернет-информации + GPT | [Плагин] [GPT получает информацию из интернета](https://www.bilibili.com/video/BV1om4y127ck) и отвечает на вопросы, чтобы информация никогда не устаревала -⭐Точный перевод статей Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Плагин] [Переводите статьи Arxiv наивысшего качества](https://www.bilibili.com/video/BV1dz4y1v77A/) всего одним нажатием. Сейчас это лучший инструмент для перевода научных статей -⭐[Реальное время ввода голосом](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Плагин] Асинхронно [слушать аудио](https://www.bilibili.com/video/BV1AV4y187Uy/), автоматически разбивать на предложения, автоматически находить момент для ответа -Отображение формул/изображений/таблиц | Поддержка отображения формул в форме [tex и рендеринга](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддержка подсветки синтаксиса формул и кода -⭐Плагин AutoGen для множества интеллектуальных агентов | [Плагин] Используйте Microsoft AutoGen для исследования возможностей интеллектуального всплытия нескольких агентов! -Запуск [темной темы](https://github.com/binary-husky/gpt_academic/issues/173) | Добавьте `/?__theme=dark` в конец URL в браузере, чтобы переключиться на темную тему -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf) | Быть обслуживаемым GPT3.5, GPT4, [ChatGLM2 из Цинхуа](https://github.com/THUDM/ChatGLM2-6B), [MOSS из Фуданя](https://github.com/OpenLMLab/MOSS) одновременно должно быть очень приятно, не так ли? -⭐Модель ChatGLM2 Fine-tune | Поддержка загрузки модели ChatGLM2 Fine-tune, предоставляет вспомогательный плагин ChatGLM2 Fine-tune -Больше моделей LLM, поддержка [развертывания huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Включение интерфейса Newbing (новый Bing), введение поддержки китайских [Jittorllms](https://github.com/Jittor/JittorLLMs) для поддержки [LLaMA](https://github.com/facebookresearch/llama) и [Panguα](https://openi.org.cn/pangu/) -⭐Пакет pip [void-terminal](https://github.com/binary-husky/void-terminal) | Без GUI вызывайте все функциональные плагины этого проекта прямо из Python (разрабатывается) -⭐Плагин пустого терминала | [Плагин] Используя естественный язык, напрямую распоряжайтесь другими плагинами этого проекта -Больше новых функций (генерация изображений и т. д.) ... | Смотрите в конце этого документа ... -
- - -- Новый интерфейс (изменение опции LAYOUT в `config.py` позволяет переключиться между "расположением слева и справа" и "расположением сверху и снизу") -
- -
- - -- Все кнопки генерируются динамически на основе `functional.py` и могут быть свободно дополнены, освобождая буфер обмена -
- -
- -- Улучшение/исправление -
- -
- - - -- Если вывод содержит формулы, они отображаются одновременно в виде tex и отрендеренного вида для удобства копирования и чтения -
- -
- -- Не хочешь смотреть код проекта? Весь проект сразу в уста ChatGPT -
- -
- -- Смешанное использование нескольких больших языковых моделей (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -# Установка -### Метод установки I: Прямой запуск (Windows, Linux или MacOS) - -1. Скачайте проект -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Настройте API_KEY - -В файле `config.py` настройте API KEY и другие настройки, [нажмите здесь, чтобы узнать способы настройки в специальных сетевых средах](https://github.com/binary-husky/gpt_academic/issues/1). [Инструкции по настройке проекта](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions). - -「 Программа будет в первую очередь проверять наличие файла config_private.py с приватными настройками и заменять соответствующие настройки в файле config.py на те, которые указаны в файле config_private.py. Если вы понимаете эту логику, мы настоятельно рекомендуем вам создать новый файл настроек config_private.py рядом с файлом config.py и скопировать туда настройки из config.py (только те, которые вы изменяли). 」 - -「 Поддерживается настроить проект с помощью `переменных среды`. Пример настройки переменных среды можно найти в файле docker-compose.yml или на нашей [странице вики](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions). Приоритет настроек: `переменные среды` > `config_private.py` > `config.py`. 」 - - -3. Установите зависимости -```sh -# (Выбор I: Если знакомы с Python, python>=3.9). Примечание: используйте официальный pip-репозиторий или пакетный репозиторий Alibaba, временный способ изменить источник: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Выбор II: Используйте Anaconda). Шаги аналогичны (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Создание среды Anaconda -conda activate gptac_venv # Активация среды Anaconda -python -m pip install -r requirements.txt # Здесь все тоже самое, что и с установкой для pip -``` - - -
Если вам нужна поддержка ChatGLM2 от Цинхуа/MOSS от Фуданя/Раннера RWKV как бэкенда, нажмите, чтобы развернуть -

- -【Опциональный шаг】Если вам нужна поддержка ChatGLM2 от Цинхуа/Сервиса MOSS от Фуданя, вам понадобится дополнительно установить дополнительные зависимости (предполагается, что вы знакомы с Python + PyTorch + у вас достаточно мощный компьютер): -```sh -# 【Опциональный шаг I】Поддержка ChatGLM2 от Цинхуа. Примечание к ChatGLM от Цинхуа: Если вы столкнулись с ошибкой "Call ChatGLM fail 不能正常加载ChatGLM的参数", обратите внимание на следующее: 1: По умолчанию установлена версия torch+cpu, для использования cuda необходимо удалить torch и установить версию torch+cuda; 2: Если вы не можете загрузить модель из-за недостаточной мощности компьютера, вы можете изменить точность модели в файле request_llm/bridge_chatglm.py, заменив AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) на AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【Опциональный шаг II】Поддержка MOSS от Фуданя -python -m pip install -r request_llms/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Обратите внимание, что когда вы запускаете эту команду, вы должны находиться в корневой папке проекта - -# 【Опциональный шаг III】Поддержка RWKV Runner -Смотрите вики: https://github.com/binary-husky/gpt_academic/wiki/Поддержка-RWKV-Runner - -# 【Опциональный шаг IV】Убедитесь, что config.py содержит все нужные вам модели. Пример: -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Запустите программу -```sh -python main.py -``` - -### Метод установки II: Используйте Docker - -0. Установка всех возможностей проекта (это большой образ с поддержкой cuda и LaTeX; но если у вас медленный интернет или маленький жесткий диск, мы не рекомендуем использовать этот метод). -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) - -``` sh -# Измените файл docker-compose.yml, сохраните метод 0 и удалите другие методы. Затем запустите: -docker-compose up -``` - -1. Чат GPT + 文心一言 + Spark и другие онлайн-модели (рекомендуется для большинства пользователей) -[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) -[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) -[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) - -``` sh -# Измените файл docker-compose.yml, сохраните метод 1 и удалите другие методы. Затем запустите: -docker-compose up -``` - -P.S. Если вам нужен функционал, связанный с LaTeX, обратитесь к разделу Wiki. Кроме того, вы также можете использовать схему 4 или схему 0 для доступа к функционалу LaTeX. - -2. Чат GPT + ChatGLM2 + MOSS + LLAMA2 + TakyiQ & Другие попытки ввести в обиход -[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) - -``` sh -# Измените файл docker-compose.yml, сохраните метод 2 и удалите другие методы. Затем запустите: -docker-compose up -``` - - -### Метод установки III: Другие способы развертывания -1. **Скрипты запуска одним нажатием для Windows**. -Пользователи Windows, не знакомые с окружением Python, могут загрузить одну из версий в разделе [Релизы](https://github.com/binary-husky/gpt_academic/releases) для установки версии без локальных моделей. -Скрипты взяты из вкладки [oobabooga](https://github.com/oobabooga/one-click-installers). - -2. Использование сторонних API, Azure и т. д., см. страницу [вики](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions) - -3. Руководство по развертыванию на удаленном сервере. -Пожалуйста, посетите [вики-страницу развертывания на облачном сервере](https://github.com/binary-husky/gpt_academic/wiki/Руководство-по-развертыванию-на-облаке). - -4. Некоторые новые платформы или методы развертывания - - Использование Sealos [для однократного развертывания](https://github.com/binary-husky/gpt_academic/issues/993) - - Использование WSL2 (Windows Subsystem for Linux). См. [Руководство развертывания-2](https://github.com/binary-husky/gpt_academic/wiki/Using-WSL2-for-deployment) - - Как запустить на вложенном URL-адресе (например, `http://localhost/subpath`). См. [Инструкции по работе с FastAPI](docs/WithFastapi.md) - - - -# Расширенное использование -### I: Пользовательские удобные кнопки (академические сочетания клавиш) -Откройте файл `core_functional.py` в любом текстовом редакторе и добавьте следующие записи, затем перезапустите программу. (Если кнопка уже существует, то префикс и суффикс поддерживают горячую замену без перезапуска программы.) -Например, -``` -"Супер-англо-русский перевод": { - # Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д. - "Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n", - - # Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префиксом, чтобы заключить ваш ввод в кавычки. - "Суффикс": "", -}, -``` -
- -
- -### II: Пользовательские функциональные плагины -Создавайте мощные функциональные плагины для выполнения любых задач, которые вам нужны и которых вы и не можете себе представить. -Создание плагина для этого проекта и его отладка являются простыми задачами, и если у вас есть базовые знания Python, вы можете реализовать свой собственный функциональный плагин, используя наши предоставленные шаблоны. -Дополнительную информацию см. в [Руководстве по функциональным плагинам](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - - -# Обновления -### I: Динамические - -1. Функция сохранения диалога. Вызовите "Сохранить текущий диалог" в области функциональных плагинов, чтобы сохранить текущий диалог в виде читаемого и восстанавливаемого html-файла. -Кроме того, можно использовать "Загрузить архивный файл диалога" в области функциональных плагинов (выпадающее меню), чтобы восстановить предыдущий разговор. -Подсказка: если не указывать файл и просто щелкнуть "Загрузить архивный файл диалога", можно просмотреть кэш сохраненных html-архивов. -
- -
- -2. ⭐Перевод Latex/Arxiv статей⭐ -
- ===> - -
- -3. Void Terminal (понимание пользовательских намерений из естественного языка и автоматическое вызов других плагинов) - -- Шаг 1: Введите "Пожалуйста, вызовите плагин для перевода PDF-статьи, адрес которой https://openreview.net/pdf?id=rJl0r3R9KX". -- Шаг 2: Нажмите "Void Terminal". - -
- -
- -4. Модульный дизайн функционала, позволяющий реализовать мощные функции с помощью простых интерфейсов -
- - -
- -5. Перевод и анализ других открытых проектов -
- - -
- -6. Функциональность для украшения[meme](https://github.com/fghrsh/live2d_demo) (по умолчанию отключена, требуется изменение файла `config.py`) -
- -
- -7. Генерация изображений с помощью OpenAI -
- -
- -8. Анализ и обобщение аудио с помощью OpenAI -
- -
- -9. Проверка и исправление ошибок во всем тексте LaTeX -
- ===> - -
- -10. Изменение языка и темы -
- -
- - - -### II: Версии: -- Версия 3.70 (в планах): Оптимизация темы AutoGen и разработка ряда дополнительных плагинов -- Версия 3.60: Внедрение AutoGen в качестве фундамента нового поколения плагинов -- Версия 3.57: Поддержка GLM3, Starfire v3, Wenxin One Word v4, исправление ошибок при совместном использовании локальной модели -- Версия 3.56: Поддержка добавления дополнительных функциональных кнопок в реальном времени, новая страница отчетов в формате PDF -- Версия 3.55: Переработка пользовательского интерфейса, внедрение плавающего окна и панели меню -- Версия 3.54: Добавлен интерпретатор кода (Code Interpreter) (в разработке) -- Версия 3.53: Динамический выбор различных тем интерфейса, повышение стабильности и решение проблемы конфликтов между несколькими пользователями -- Версия 3.50: Использование естественного языка для вызова всех функциональных плагинов проекта (Void Terminal), поддержка категоризации плагинов, улучшение пользовательского интерфейса, разработка новых тем -- Версия 3.49: Поддержка платформы Baidu Qianfan и Wenxin One Word -- Версия 3.48: Поддержка Ali Dharma Institute, Shanghai AI-Lab Scholar, Xunfei Starfire -- Версия 3.46: Поддержка реального голосового диалога с полной автоматизацией -- Версия 3.45: Поддержка настраиваемой модели ChatGLM2 -- Версия 3.44: Официальная поддержка Azure, улучшение удобства пользовательского интерфейса -- Версия 3.4: +Перевод полных текстов PDF, +корректировка латексных документов -- Версия 3.3: +Интернет-информационные функции -- Версия 3.2: Поддержка дополнительных параметров в функциональных плагинах (функция сохранения диалога, интерпретация кода на любом языке + одновременный вопрос о любом комбинированном LLM) -- Версия 3.1: Поддержка одновременного обращения к нескольким моделям gpt! Поддержка API2D, поддержка равномерной нагрузки нескольких api-ключей -- Версия 3.0: Поддержка chatglm и других небольших моделей llm -- Версия 2.6: Переработка структуры плагинов для повышения интерактивности, добавление дополнительных плагинов -- Версия 2.5: Автоматическое обновление, решение проблемы с длиной текста и переполнением токенов при обработке текста -- Версия 2.4: (1) Добавление функции полного перевода PDF; (2) Добавление функции изменения позиции объекта ввода; (3) Добавление функции вертикального размещения; (4) Оптимизация многопоточных функциональных плагинов. -- Версия 2.3: Улучшение интерактивности многопоточности -- Версия 2.2: Поддержка живой перезагрузки функциональных плагинов -- Версия 2.1: Складываемый макет -- Версия 2.0: Введение модульных функциональных плагинов -- Версия 1.0: Базовые функции - -GPT Academic Группа QQ разработчиков: `610599535` - -- Известные проблемы - - Некоторые расширения для браузера могут мешать работе пользовательского интерфейса этого программного обеспечения - - У официального Gradio есть много проблем совместимости, поэтому обязательно установите Gradio с помощью `requirement.txt` - -### III: Темы -Вы можете изменить тему путем изменения опции `THEME` (config.py) -1. `Chuanhu-Small-and-Beautiful` [ссылка](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) - - -### IV: Ветви разработки этого проекта - -1. Ветка `master`: Основная ветка, стабильная версия -2. Ветвь `frontier`: Ветвь разработки, версия для тестирования - - -### V: Справочники и обучение - -``` -В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения: - -# ChatGLM2-6B от Тиньхуа: -https://github.com/THUDM/ChatGLM2-6B - -# Линейные модели с ограниченной памятью от Тиньхуа: -https://github.com/Jittor/JittorLLMs - -# ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - - - -# Установщик с одним щелчком Oobabooga: -https://github.com/oobabooga/one-click-installers - -# Больше: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo diff --git a/docs/README.md.German.md b/docs/README.md.German.md deleted file mode 100644 index d514de30f54bd8931568c029a3bbd3aa3eacdbb1..0000000000000000000000000000000000000000 --- a/docs/README.md.German.md +++ /dev/null @@ -1,307 +0,0 @@ -> **Hinweis** -> -> Bei der Installation von Abhängigkeiten sollten nur die in **requirements.txt** **angegebenen Versionen** streng ausgewählt werden. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/` - -# GPT Akademisch optimiert (GPT Academic) - -**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Stern; wenn Sie bessere Tastenkombinationen oder Funktions-Plugins entwickelt haben, können Sie gerne einen Pull Request eröffnen.** - -Wenn Sie dieses Projekt mögen, geben Sie ihm bitte einen Stern. Wenn Sie weitere nützliche wissenschaftliche Abkürzungen oder funktionale Plugins entwickelt haben, können Sie gerne ein Problem oder eine Pull-Anforderung öffnen. Wir haben auch ein README in [Englisch|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md), das von diesem Projekt selbst übersetzt wurde. -Um dieses Projekt in eine beliebige Sprache mit GPT zu übersetzen, lesen Sie `multi_language.py` (experimentell). - -> **Hinweis** -> -> 1. Beachten Sie bitte, dass nur Funktionserweiterungen (Schaltflächen) mit **roter Farbe** Dateien lesen können und einige Erweiterungen im **Dropdown-Menü** des Erweiterungsbereichs zu finden sind. Außerdem begrüßen wir jede neue Funktionserweiterung mit **höchster Priorität** und bearbeiten sie. -> -> 2. Die Funktionalität jeder Datei in diesem Projekt wird in der Selbstanalyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) detailliert beschrieben. Mit der Weiterentwicklung der Versionen können Sie jederzeit die zugehörigen Funktions-Erweiterungen aufrufen, um durch Aufruf von GPT einen Selbstanalysebericht des Projekts zu erstellen. Häufig gestellte Fragen finden Sie in der [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installationsanweisungen](#Installation). -> -> 3. Dieses Projekt ist kompatibel und fördert die Verwendung von inländischen Sprachmodellen wie ChatGLM und RWKV, Pangu, etc. Es unterstützt das Vorhandensein mehrerer api-keys, die in der Konfigurationsdatei wie folgt angegeben werden können: `API_KEY="openai-key1,openai-key2,api2d-key3"`. Wenn ein `API_KEY` temporär geändert werden muss, geben Sie den temporären `API_KEY` im Eingabebereich ein und drücken Sie dann die Eingabetaste, um ihn zu übernehmen.Funktion | Beschreibung ---- | --- -Ein-Klick-Polieren | Unterstützt ein-Klick-Polieren und ein-Klick-Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten -Ein-Klick Chinesisch-Englisch Übersetzung | Ein-Klick Chinesisch-Englisch Übersetzung -Ein-Klick-Code-Erklärung | Zeigt Code, erklärt Code, erzeugt Code und fügt Kommentare zum Code hinzu -[Benutzerdefinierte Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) | Unterstützt benutzerdefinierte Tastenkombinationen -Modulare Gestaltung | Unterstützt leistungsstarke individuelle [Funktions-Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions). Plugins unterstützen [Hot-Updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Selbstprogramm-Analyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] [Ein-Klick Verstehen](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) der Quellcode dieses Projekts -[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] Ein-Klick-Analyse des Projektbaums anderer Python/C/C++/Java/Lua/...-Projekte -Lesen von Papieren, [Übersetzen](https://www.bilibili.com/video/BV1KT411x7Wn) von Papieren | [Funktions-Plugin] Ein-Klick Erklärung des gesamten LaTeX/PDF-Artikels und Erstellung einer Zusammenfassung -LaTeX-Volltext-Übersetzung und [Polieren](https://www.bilibili.com/video/BV1FT411H7c5/) | [Funktions-Plugin] Ein-Klick-Übersetzung oder-Polieren des LaTeX-Artikels -Bulk-Kommentargenerierung | [Funktions-Plugin] Ein-Klick Massenerstellung von Funktionskommentaren -Markdown [Chinesisch-Englisch Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Funktions-Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen? -Analyse-Berichtserstellung von chat | [Funktions-Plugin] Automatische Zusammenfassung nach der Ausführung -[Funktion zur vollständigen Übersetzung von PDF-Artikeln](https://www.bilibili.com/video/BV1KT411x7Wn) | [Funktions-Plugin] Extrahiert Titel und Zusammenfassung der PDF-Artikel und übersetzt den gesamten Text (mehrere Threads) -[Arxiv-Assistent](https://www.bilibili.com/video/BV1LM4y1279X) | [Funktions-Plugin] Geben Sie die Arxiv-Artikel-URL ein und klicken Sie auf Eine-Klick-Übersetzung-Zusammenfassung + PDF-Download -[Google Scholar Integrations-Assistent](https://www.bilibili.com/video/BV19L411U7ia) | [Funktions-Plugin] Geben Sie eine beliebige Google Scholar Such-URL ein und lassen Sie gpt Ihnen bei der Erstellung von [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) helfen -Internet-Informationen Aggregation + GPT | [Funktions-Plugin] Lassen Sie GPT eine Frage beantworten, indem es [zuerst Informationen aus dem Internet](https://www.bilibili.com/video/BV1om4y127ck/) sammelt und so die Informationen nie veralten -Anzeige von Formeln / Bildern / Tabellen | Zeigt Formeln in beiden Formen, [TeX-Format und gerendeter Form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), unterstützt Formeln und Code-Highlights -Unterstützung von PlugIns mit mehreren Threads | Unterstützt den Aufruf mehrerer Threads in Chatgpt, um Text oder Programme [Batch zu verarbeiten](https://www.bilibili.com/video/BV1FT411H7c5/) -Starten Sie das dunkle Gradio-[Thema](https://github.com/binary-husky/gpt_academic/issues/173) | Fügen Sie ```/?__theme=dark``` an das Ende der Browser-URL an, um das dunkle Thema zu aktivieren -[Unterstützung für mehrere LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) Interface-Unterstützung | Das Gefühl, gleichzeitig von GPT3.5, GPT4, [Tshinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) bedient zu werden, muss toll sein, oder? -Zugriff auf weitere LLM-Modelle, Unterstützung von [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der Unterstützung von [Jittorllms](https://github.com/Jittor/JittorLLMs) der Tsinghua-Universität, [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) und [Pangu alpha](https://openi.org.cn/pangu/) -Weitere neue Funktionen (wie Bildgenerierung) …… | Siehe Ende dieses Dokuments …… - -- Neue Oberfläche (Ändern Sie die LAYOUT-Option in `config.py`, um zwischen "Seitenlayout" und "Oben-unten-Layout" zu wechseln) -
- -
- All buttons are dynamically generated by reading `functional.py`, and custom functions can be easily added, freeing up the clipboard. -
- -
- -- Proofreading/Correcting -
- -
- -- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading. -
- -
- -- Don't feel like reading the project code? Show off the entire project to chatgpt. -
- -
- -- Multiple large language models are mixed and called together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4). -
- -
- ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -Configure API KEY and other settings in `config.py`. [Special Network Environment Settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a "config_private.py" private configuration file, and use the configuration defined in it to override the configuration of "config.py". Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named "config_private.py" next to "config.py" and transfer (copy) the configurations in "config.py" to "config_private.py". "config_private.py" is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` >`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familar with Python) (Python version 3.9 or above, the newer the better), Note: Use the official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If not familiar with Python) Use anaconda with similar steps (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # Same step as pip installation -``` - -
Click to expand if supporting Tsinghua ChatGLM/Fudan MOSS as backend -

- -[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration): -```sh -# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path - -# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation-Method 2: Using Docker - -1. Only ChatGPT (Recommended for most people) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download the project -cd gpt_academic # Enter the path -nano config.py # Edit config.py with any text editor, Configure "Proxy","API_KEY"and"WEB_PORT" (e.g 50923) etc. -docker build -t gpt-academic . # Install - -# (Last step-option 1) Under Linux environment, use `--net=host` is more convenient and quick -docker run --rm -it --net=host gpt-academic -# (Last step-option 2) Under macOS/windows environment, can only use the -p option to expose the container's port(eg.50923) to the port on the host. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Requires familiarity with Docker) - -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 3, and retain solution 2. Modify the configuration of solution 2 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - -3. ChatGPT+LLAMA+Pangu+RWKV(Requires familiarity with Docker) -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 2, and retain solution 3. Modify the configuration of solution 3 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - - -## Installation-Method 3: Other Deployment Options - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote cloud server deployment (requires cloud server knowledge and experience) -Please visit [Deployment wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL 2 (Windows subsystem for Linux) -Please visit [Deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at a secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI operating instructions](docs/WithFastapi.md) - -5. Use docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenience buttons / custom function plugins. - -1. Customize new convenience buttons (Academic Shortcut Keys) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, then the prefix and suffix can be hot-modified, and it will take effect without restarting the program.) -For example -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, used to describe your requirements, such as translation, explaining code, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain the proper nouns that appear in the text one by one:\n\n", - - # Suffix, will be added after your input. For example, combined with prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugins - -Write powerful function plugins to perform any task you want and can't think of. -The difficulty of plugin writing and debugging is very low in this project. As long as you have a certain knowledge of Python, you can implement your own plugin functions by imitating the template we provided. -For more information, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New feature dynamics1. Funktion zur Speicherung von Dialogen. Rufen Sie im Bereich der Funktions-Plugins "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbares und wiederherstellbares HTML-Datei zu speichern. Darüber hinaus können Sie im Funktions-Plugin-Bereich (Dropdown-Menü) "Laden von Dialogverlauf" aufrufen, um den vorherigen Dialog wiederherzustellen. Tipp: Wenn Sie keine Datei angeben und stattdessen direkt auf "Laden des Dialogverlaufs" klicken, können Sie das HTML-Cache-Archiv anzeigen. Durch Klicken auf "Löschen aller lokalen Dialogverlaufsdatensätze" können alle HTML-Archiv-Caches gelöscht werden. -
- -
- -2. Berichterstellung. Die meisten Plugins generieren nach Abschluss der Ausführung einen Arbeitsbericht. -
- - - -
- -3. Modularisierte Funktionsgestaltung, einfache Schnittstellen mit leistungsstarken Funktionen. -
- - -
- -4. Dies ist ein Open-Source-Projekt, das sich "selbst übersetzen" kann. -
- -
- -5. Die Übersetzung anderer Open-Source-Projekte ist kein Problem. -
- -
- -
- -
- -6. Dekorieren Sie [`live2d`](https://github.com/fghrsh/live2d_demo) mit kleinen Funktionen (standardmäßig deaktiviert, Änderungen an `config.py` erforderlich). -
- -
- -7. Neue MOSS-Sprachmodellunterstützung. -
- -
- -8. OpenAI-Bildgenerierung. -
- -
- -9. OpenAI-Audio-Analyse und Zusammenfassung. -
- -
- -10. Latex-Proofreading des gesamten Textes. -
- -
- - -## Version: -- Version 3.5 (Todo): Rufen Sie alle Funktionserweiterungen dieses Projekts mit natürlicher Sprache auf (hohe Priorität). -- Version 3.4 (Todo): Verbesserte Unterstützung mehrerer Threads für Local Large Model (LLM). -- Version 3.3: + Internet-Informationssynthese-Funktion -- Version 3.2: Funktionserweiterungen unterstützen mehr Parameter-Schnittstellen (Speicherung von Dialogen, Interpretation beliebigen Sprachcodes + gleichzeitige Abfrage jeder LLM-Kombination) -- Version 3.1: Unterstützung mehrerer GPT-Modelle gleichzeitig! Unterstützung für API2D, Unterstützung für Lastenausgleich von mehreren API-Schlüsseln. -- Version 3.0: Unterstützung von Chatglm und anderen kleinen LLMs -- Version 2.6: Umstrukturierung der Plugin-Struktur zur Verbesserung der Interaktivität, Einführung weiterer Plugins -- Version 2.5: Automatische Aktualisierung, Problembehebung bei Quelltexten großer Projekte, wenn der Text zu lang ist oder Token überlaufen. -- Version 2.4: (1) Neue Funktion zur Übersetzung des gesamten PDF-Texts; (2) Neue Funktion zum Wechseln der Position des Eingabebereichs; (3) Neue Option für vertikales Layout; (4) Optimierung von Multithread-Funktions-Plugins. -- Version 2.3: Verbesserte Interaktivität mit mehreren Threads -- Version 2.2: Funktionserweiterungen unterstützen "Hot-Reload" -- Version 2.1: Faltbares Layout -- Version 2.0: Einführung von modularisierten Funktionserweiterungen -- Version 1.0: Grundlegende Funktionengpt_academic Entwickler QQ-Gruppe-2: 610599535 - -- Bekannte Probleme - - Einige Browser-Übersetzungs-Plugins können die Frontend-Ausführung dieser Software stören. - - Sowohl eine zu hohe als auch eine zu niedrige Version von Gradio führt zu verschiedenen Ausnahmen. - -## Referenz und Lernen - -``` -Der Code bezieht sich auf viele Designs von anderen herausragenden Projekten, insbesondere: - -# Projekt 1: ChatGLM-6B der Tsinghua Universität: -https://github.com/THUDM/ChatGLM-6B - -# Projekt 2: JittorLLMs der Tsinghua Universität: -https://github.com/Jittor/JittorLLMs - -# Projekt 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Projekt 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projekt 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Mehr: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md deleted file mode 100644 index 76efe1857bc08b435583f7e3274a5d838eb48dba..0000000000000000000000000000000000000000 --- a/docs/README.md.Italian.md +++ /dev/null @@ -1,316 +0,0 @@ -> **Nota** -> -> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt. -> -> ` pip install -r requirements.txt` - -# GPT Ottimizzazione Accademica (GPT Academic) - -**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](README_EN.md)[Giapponese|](README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](README_RS.md)[Francese](README_FR.md) tradotta da questo stesso progetto. -Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale). - -> **Nota** -> -> 1. Si prega di notare che solo i plugin (pulsanti) contrassegnati in **rosso** supportano la lettura di file, alcuni plugin sono posizionati nel **menu a discesa** nella zona dei plugin. Accettiamo e gestiamo PR per qualsiasi nuovo plugin con **massima priorità**! -> -> 2. Le funzionalità di ogni file di questo progetto sono descritte dettagliatamente nella propria analisi di autotraduzione [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Con l'iterazione delle versioni, è possibile fare clic sui plugin funzionali correlati in qualsiasi momento per richiamare GPT e generare nuovamente il rapporto di analisi automatica del progetto. Le domande frequenti sono riassunte nella [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Metodo di installazione] (#installazione). -> -> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo. - -
- -Funzione | Descrizione ---- | --- -Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic -Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic -Spiegazione del codice immediata | Visualizzazione del codice, spiegazione del codice, generazione del codice, annotazione del codice con un solo clic -[Scorciatoie personalizzate](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta scorciatoie personalizzate -Design modularizzato | Supporta potenti [plugin di funzioni](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) personalizzati, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Auto-profiling del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] [Comprensione immediata](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) del codice sorgente di questo progetto -[Analisi del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] Un clic può analizzare l'albero di altri progetti Python/C/C++/Java/Lua/... -Lettura del documento, [traduzione](https://www.bilibili.com/video/BV1KT411x7Wn) del documento | [Plugin di funzioni] La lettura immediata dell'intero documento latex/pdf di un documento e la generazione di un riassunto -Traduzione completa di un documento Latex, [correzione immediata](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin di funzioni] Una traduzione o correzione immediata di un documento Latex -Generazione di annotazioni in batch | [Plugin di funzioni] Generazione automatica delle annotazioni di funzione con un solo clic -[Traduzione cinese-inglese di Markdown](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin di funzioni] Hai letto il [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) delle cinque lingue sopra? -Generazione di report di analisi di chat | [Plugin di funzioni] Generazione automatica di un rapporto di sintesi dopo l'esecuzione -[Funzione di traduzione di tutto il documento PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin di funzioni] Estrarre il titolo e il sommario dell'articolo PDF + tradurre l'intero testo (multithreading) -[Assistente di Arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin di funzioni] Inserire l'URL dell'articolo di Arxiv e tradurre il sommario con un clic + scaricare il PDF -[Assistente integrato di Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin di funzioni] Con qualsiasi URL di pagina di ricerca di Google Scholar, lascia che GPT ti aiuti a scrivere il tuo [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -Aggregazione delle informazioni su Internet + GPT | [Plugin di funzioni] Fai in modo che GPT rilevi le informazioni su Internet prima di rispondere alle domande, senza mai diventare obsolete -Visualizzazione di formule/img/tabelle | È possibile visualizzare un'equazione in forma [tex e render](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) contemporaneamente, supporta equazioni e evidenziazione del codice -Supporto per plugin di funzioni multithreading | Supporto per chiamata multithreaded di chatgpt, elaborazione con un clic di grandi quantità di testo o di un programma -Avvia il tema di gradio [scuro](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungere ```/?__theme=dark``` dopo l'URL del browser per passare a un tema scuro -Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto? -Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/) -Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento... -
- - -- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore) -
- -
Sei un traduttore professionista di paper accademici. - -- Tutti i pulsanti vengono generati dinamicamente leggendo il file functional.py, e aggiungerci nuove funzionalità è facile, liberando la clipboard. -
- -
- -- Revisione/Correzione -
- -
- -- Se l'output contiene una formula, viene visualizzata sia come testo che come formula renderizzata, per facilitare la copia e la visualizzazione. -
- -
- -- Non hai tempo di leggere il codice del progetto? Passa direttamente a chatgpt e chiedi informazioni. -
- -
- -- Chiamata mista di vari modelli di lingua di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# Installazione -## Installazione - Metodo 1: Esecuzione diretta (Windows, Linux o MacOS) - -1. Scarica il progetto -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configura API_KEY - -In `config.py`, configura la tua API KEY e altre impostazioni, [configs for special network environments](https://github.com/binary-husky/gpt_academic/issues/1). - -(N.B. Quando il programma viene eseguito, verifica prima se esiste un file di configurazione privato chiamato `config_private.py` e sovrascrive le stesse configurazioni in `config.py`. Pertanto, se capisci come funziona la nostra logica di lettura della configurazione, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py`, e spostare (copiare) le configurazioni di `config.py` in `config_private.py`. 'config_private.py' non è sotto la gestione di git e può proteggere ulteriormente le tue informazioni personali. NB Il progetto supporta anche la configurazione della maggior parte delle opzioni tramite "variabili d'ambiente". La sintassi della variabile d'ambiente è descritta nel file `docker-compose`. Priorità di lettura: "variabili d'ambiente" > "config_private.py" > "config.py") - - -3. Installa le dipendenze -```sh -# (Scelta I: se sei familiare con python) (python 3.9 o superiore, più nuovo è meglio), N.B.: utilizza il repository ufficiale pip o l'aliyun pip repository, metodo temporaneo per cambiare il repository: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Scelta II: se non conosci Python) utilizza anaconda, il processo è simile (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # crea l'ambiente anaconda -conda activate gptac_venv # attiva l'ambiente anaconda -python -m pip install -r requirements.txt # questo passaggio funziona allo stesso modo dell'installazione con pip -``` - -
Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, fare clic qui per espandere -

- -【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente): -```sh -# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto - -# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Esegui -```sh -python main.py -```5. Plugin di test delle funzioni -``` -- Funzione plugin di test (richiede una risposta gpt su cosa è successo oggi in passato), puoi utilizzare questa funzione come template per implementare funzionalità più complesse - Clicca su "[Demo del plugin di funzione] Oggi nella storia" -``` - -## Installazione - Metodo 2: Utilizzo di Docker - -1. Solo ChatGPT (consigliato per la maggior parte delle persone) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # scarica il progetto -cd gpt_academic # entra nel percorso -nano config.py # con un qualsiasi editor di testo, modifica config.py configurando "Proxy", "API_KEY" e "WEB_PORT" (ad esempio 50923) -docker build -t gpt-academic . # installa - -#(ultimo passaggio - selezione 1) In un ambiente Linux, utilizzare '--net=host' è più conveniente e veloce -docker run --rm -it --net=host gpt-academic -#(ultimo passaggio - selezione 2) In un ambiente MacOS/Windows, l'opzione -p può essere utilizzata per esporre la porta del contenitore (ad es. 50923) alla porta della macchina -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 3, mantieni il piano 2. Modifica la configurazione del piano 2 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 2, mantieni il piano 3. Modifica la configurazione del piano 3 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - - -## Installazione - Metodo 3: Altre modalità di distribuzione - -1. Come utilizzare un URL di reindirizzamento / AzureAPI Cloud Microsoft -Configura API_URL_REDIRECT seguendo le istruzioni nel file `config.py`. - -2. Distribuzione su un server cloud remoto (richiede conoscenze ed esperienza di server cloud) -Si prega di visitare [wiki di distribuzione-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Utilizzo di WSL2 (Windows Subsystem for Linux) -Si prega di visitare [wiki di distribuzione-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. Come far funzionare ChatGPT all'interno di un sottodominio (ad es. `http://localhost/subpath`) -Si prega di visitare [Istruzioni per l'esecuzione con FastAPI] (docs/WithFastapi.md) - -5. Utilizzo di docker-compose per l'esecuzione -Si prega di leggere il file docker-compose.yml e seguire le istruzioni fornite. - ---- -# Uso avanzato -## Personalizzazione dei pulsanti / Plugin di funzione personalizzati - -1. Personalizzazione dei pulsanti (scorciatoie accademiche) -Apri `core_functional.py` con qualsiasi editor di testo e aggiungi la voce seguente, quindi riavvia il programma (se il pulsante è già stato aggiunto con successo e visibile, il prefisso e il suffisso supportano la modifica in tempo reale, senza bisogno di riavviare il programma). - -ad esempio -``` -"超级英译中": { - # Prefisso, verrà aggiunto prima del tuo input. Ad esempio, descrivi la tua richiesta, come tradurre, spiegare il codice, correggere errori, ecc. - "Prefix": "Per favore traduci questo testo in Cinese, e poi spiega tutti i termini tecnici nel testo con una tabella markdown:\n\n", - - # Suffisso, verrà aggiunto dopo il tuo input. Ad esempio, con il prefisso puoi circondare il tuo input con le virgolette. - "Suffix": "", -}, -``` -
- -
- -2. Plugin di funzione personalizzati - -Scrivi plugin di funzione personalizzati e esegui tutte le attività che desideri o non hai mai pensato di fare. -La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bassa. Se si dispone di una certa conoscenza di base di Python, è possibile realizzare la propria funzione del plugin seguendo il nostro modello. Per maggiori dettagli, consultare la [guida al plugin per funzioni](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Ultimo aggiornamento -## Nuove funzionalità dinamiche - -1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html. -
- -
- -2. Generazione di rapporti. La maggior parte dei plugin genera un rapporto di lavoro dopo l'esecuzione. -
- - - -
- -3. Progettazione modulare delle funzioni, semplici interfacce ma in grado di supportare potenti funzionalità. -
- - -
- -4. Questo è un progetto open source che può "tradursi da solo". -
- -
- -5. Tradurre altri progetti open source è semplice. -
- -
- -
- -
- -6. Piccola funzione decorativa per [live2d](https://github.com/fghrsh/live2d_demo) (disattivata per impostazione predefinita, è necessario modificare `config.py`). -
- -
- -7. Supporto del grande modello linguistico MOSS -
- -
- -8. Generazione di immagini OpenAI -
- -
- -9. Analisi e sintesi audio OpenAI -
- -
- -10. Verifica completa dei testi in LaTeX -
- -
- - -## Versione: -- versione 3.5(Todo): utilizzo del linguaggio naturale per chiamare tutti i plugin di funzioni del progetto (alta priorità) -- versione 3.4(Todo): supporto multi-threading per il grande modello linguistico locale Chatglm -- versione 3.3: +funzionalità di sintesi delle informazioni su Internet -- versione 3.2: i plugin di funzioni supportano più interfacce dei parametri (funzionalità di salvataggio della conversazione, lettura del codice in qualsiasi lingua + richiesta simultanea di qualsiasi combinazione di LLM) -- versione 3.1: supporto per interrogare contemporaneamente più modelli gpt! Supporto api2d, bilanciamento del carico per più apikey -- versione 3.0: supporto per Chatglm e altri piccoli LLM -- versione 2.6: ristrutturazione della struttura del plugin, miglioramento dell'interattività, aggiunta di più plugin -- versione 2.5: auto-aggiornamento, risoluzione del problema di testo troppo lungo e overflow del token durante la sintesi di grandi progetti di ingegneria -- versione 2.4: (1) funzionalità di traduzione dell'intero documento in formato PDF aggiunta; (2) funzionalità di scambio dell'area di input aggiunta; (3) opzione di layout verticale aggiunta; (4) ottimizzazione della funzione di plugin multi-threading. -- versione 2.3: miglioramento dell'interattività multi-threading -- versione 2.2: i plugin di funzioni supportano l'hot-reload -- versione 2.1: layout ripiegabile -- versione 2.0: introduzione di plugin di funzioni modulari -- versione 1.0: funzione di basegpt_academic sviluppatori gruppo QQ-2: 610599535 - -- Problemi noti - - Alcuni plugin di traduzione del browser interferiscono con l'esecuzione del frontend di questo software - - La versione di gradio troppo alta o troppo bassa può causare diversi malfunzionamenti - -## Riferimenti e apprendimento - -``` -Il codice fa riferimento a molte altre eccellenti progettazioni di progetti, principalmente: - -# Progetto 1: ChatGLM-6B di Tsinghua: -https://github.com/THUDM/ChatGLM-6B - -# Progetto 2: JittorLLMs di Tsinghua: -https://github.com/Jittor/JittorLLMs - -# Progetto 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Progetto 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Progetto 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Altro: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md deleted file mode 100644 index 61b8e4a051b455514748363e62ac04fd27bd10e3..0000000000000000000000000000000000000000 --- a/docs/README.md.Korean.md +++ /dev/null @@ -1,270 +0,0 @@ -> **노트** -> -> 의존성을 설치할 때는 반드시 requirements.txt에서 **지정된 버전**을 엄격하게 선택하십시오. -> -> `pip install -r requirements.txt` - -# GPT 학술 최적화 (GPT Academic) - -**이 프로젝트가 마음에 드신다면 Star를 주세요. 추가로 유용한 학술 단축키나 기능 플러그인이 있다면 이슈나 pull request를 남기세요. 이 프로젝트에 대한 [영어 |](docs/README_EN.md)[일본어 |](docs/README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[러시아어 |](docs/README_RS.md)[프랑스어](docs/README_FR.md)로 된 README도 있습니다. -GPT를 이용하여 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오. (실험적) - -> **노트** -> -> 1. 파일을 읽기 위해 **빨간색**으로 표시된 기능 플러그인 (버튼) 만 지원됩니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인은 **가장 높은 우선순위**로 환영하며 처리합니다! -> -> 2. 이 프로젝트의 각 파일의 기능을 [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)에서 자세히 설명합니다. 버전이 업데이트 됨에 따라 관련된 기능 플러그인을 클릭하고 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수도 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)에서 볼 수 있습니다. [설치 방법](#installation). -> -> 3. 이 프로젝트는 국내 언어 모델 chatglm과 RWKV, 판고 등의 시도와 호환 가능합니다. 여러 개의 api-key를 지원하며 설정 파일에 "API_KEY="openai-key1,openai-key2,api2d-key3""와 같이 작성할 수 있습니다. `API_KEY`를 임시로 변경해야하는 경우 입력 영역에 임시 `API_KEY`를 입력 한 후 엔터 키를 누르면 즉시 적용됩니다. - -
- -기능 | 설명 ---- | --- -원 키워드 | 원 키워드 및 논문 문법 오류를 찾는 기능 지원 -한-영 키워드 | 한-영 키워드 지원 -코드 설명 | 코드 표시, 코드 설명, 코드 생성, 코드에 주석 추가 -[사용자 정의 바로 가기 키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 바로 가기 키 지원 -모듈식 설계 | 강력한[함수 플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인이 [램 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 지원합니다. -[자체 프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] [원 키 우드] 프로젝트 소스 코드의 내용을 이해하는 기능을 제공 -[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] 프로젝트 트리를 분석할 수 있습니다 (Python/C/C++/Java/Lua/...) -논문 읽기, 번역 | [함수 플러그인] LaTex/PDF 논문의 전문을 읽고 요약을 생성합니다. -LaTeX 텍스트[번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [원 키워드](https://www.bilibili.com/video/BV1FT411H7c5/) | [함수 플러그인] LaTeX 논문의 번역 또는 개량을 위해 일련의 모드를 번역할 수 있습니다. -대량의 주석 생성 | [함수 플러그인] 함수 코멘트를 대량으로 생성할 수 있습니다. -Markdown 한-영 번역 | [함수 플러그인] 위의 5 종 언어의 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 볼 수 있습니다. -chat 분석 보고서 생성 | [함수 플러그인] 수행 후 요약 보고서를 자동으로 생성합니다. -[PDF 논문 번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [함수 플러그인] PDF 논문이 제목 및 요약을 추출한 후 번역됩니다. (멀티 스레드) -[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [함수 플러그인] Arxiv 논문 URL을 입력하면 요약을 번역하고 PDF를 다운로드 할 수 있습니다. -[Google Scholar 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | [함수 플러그인] Google Scholar 검색 페이지 URL을 제공하면 gpt가 [Related Works 작성](https://www.bilibili.com/video/BV1GP411U7Az/)을 도와줍니다. -인터넷 정보 집계+GPT | [함수 플러그인] 먼저 GPT가 인터넷에서 정보를 수집하고 질문에 대답 할 수 있도록합니다. 정보가 절대적으로 구식이 아닙니다. -수식/이미지/표 표시 | 급여, 코드 강조 기능 지원 -멀티 스레드 함수 플러그인 지원 | Chatgpt를 여러 요청에서 실행하여 [대량의 텍스트](https://www.bilibili.com/video/BV1FT411H7c5/) 또는 프로그램을 처리 할 수 있습니다. -다크 그라디오 테마 시작 | 어둡게 주제를 변경하려면 브라우저 URL 끝에 ```/?__theme=dark```을 추가하면됩니다. -[다중 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원, [API2D](https://api2d.com/) 인터페이스 지원됨 | GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)가 모두 동시에 작동하는 것처럼 느낄 수 있습니다! -LLM 모델 추가 및[huggingface 배치](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | 새 Bing 인터페이스 (새 Bing) 추가, Clearing House [Jittorllms](https://github.com/Jittor/JittorLLMs) 지원 [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) 및 [盘古α](https://openi.org.cn/pangu/) -기타 새로운 기능 (이미지 생성 등) ... | 이 문서의 끝부분을 참조하세요. ...- 모든 버튼은 functional.py를 동적으로 읽어와서 사용자 정의 기능을 자유롭게 추가할 수 있으며, 클립 보드를 해제합니다. -
- -
- -- 검수/오타 교정 -
- -
- -- 출력에 수식이 포함되어 있으면 텍스와 렌더링의 형태로 동시에 표시되어 복사 및 읽기가 용이합니다. -
- -
- -- 프로젝트 코드를 볼 시간이 없습니까? 전체 프로젝트를 chatgpt에 직접 표시하십시오 -
- -
- -- 다양한 대형 언어 모델 범용 요청 (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# 설치 -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. 프로젝트 다운로드 -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. API_KEY 구성 - -`config.py`에서 API KEY 등 설정을 구성합니다. [특별한 네트워크 환경 설정](https://github.com/binary-husky/gpt_academic/issues/1) . - -(P.S. 프로그램이 실행될 때, 이름이 `config_private.py`인 기밀 설정 파일이 있는지 우선적으로 확인하고 해당 설정으로 `config.py`의 동일한 이름의 설정을 덮어씁니다. 따라서 구성 읽기 논리를 이해할 수 있다면, `config.py` 옆에 `config_private.py`라는 새 구성 파일을 만들고 `config.py`의 구성을 `config_private.py`로 이동(복사)하는 것이 좋습니다. `config_private.py`는 git으로 관리되지 않으며 개인 정보를 더 안전하게 보호할 수 있습니다. P.S. 프로젝트는 또한 대부분의 옵션을 `환경 변수`를 통해 설정할 수 있으며, `docker-compose` 파일을 참조하여 환경 변수 작성 형식을 확인할 수 있습니다. 우선순위: `환경 변수` > `config_private.py` > `config.py`) - - -3. 의존성 설치 -```sh -# (I 선택: 기존 python 경험이 있다면) (python 버전 3.9 이상, 최신 버전이 좋습니다), 참고: 공식 pip 소스 또는 알리 pip 소스 사용, 일시적인 교체 방법: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (II 선택: Python에 익숙하지 않은 경우) anaconda 사용 방법은 비슷함(https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # anaconda 환경 만들기 -conda activate gptac_venv # anaconda 환경 활성화 -python -m pip install -r requirements.txt # 이 단계도 pip install의 단계와 동일합니다. -``` - -
추가지원을 위해 Tsinghua ChatGLM / Fudan MOSS를 사용해야하는 경우 지원을 클릭하여 이 부분을 확장하세요. -

- -[Tsinghua ChatGLM] / [Fudan MOSS]를 백엔드로 사용하려면 추가적인 종속성을 설치해야합니다 (전제 조건 : Python을 이해하고 Pytorch를 사용한 적이 있으며, 컴퓨터가 충분히 강력한 경우) : -```sh -# [선택 사항 I] Tsinghua ChatGLM을 지원합니다. Tsinghua ChatGLM에 대한 참고사항 : "Call ChatGLM fail cannot load ChatGLM parameters normally" 오류 발생시 다음 참조: -# 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다. -# 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를 -# AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다. -python -m pip install -r request_llm/requirements_chatglm.txt - -# [선택 사항 II] Fudan MOSS 지원 -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. - -# [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. -# 현재 지원되는 전체 모델 : -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. 실행 -```sh -python main.py -```5. 테스트 함수 플러그인 -``` -- 테스트 함수 플러그인 템플릿 함수 (GPT에게 오늘의 역사에서 무슨 일이 일어났는지 대답하도록 요청)를 구현하는 데 사용할 수 있습니다. 이 함수를 기반으로 더 복잡한 기능을 구현할 수 있습니다. - "[함수 플러그인 템플릿 데모] 오늘의 역사"를 클릭하세요. -``` - -## 설치 - 방법 2 : 도커 사용 - -1. ChatGPT 만 (대부분의 사람들이 선택하는 것을 권장합니다.) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # 다운로드 -cd gpt_academic # 경로 이동 -nano config.py # 아무 텍스트 에디터로 config.py를 열고 "Proxy","API_KEY","WEB_PORT" (예 : 50923) 등을 구성합니다. -docker build -t gpt-academic . # 설치 - -#(마지막 단계-1 선택) Linux 환경에서는 --net=host를 사용하면 더 편리합니다. -docker run --rm -it --net=host gpt-academic -#(마지막 단계-2 선택) macOS / windows 환경에서는 -p 옵션을 사용하여 컨테이너의 포트 (예 : 50923)를 호스트의 포트로 노출해야합니다. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Docker에 익숙해야합니다.) - -``` sh -#docker-compose.yml을 수정하여 계획 1 및 계획 3을 삭제하고 계획 2를 유지합니다. docker-compose.yml에서 계획 2의 구성을 수정하면 됩니다. 주석을 참조하십시오. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (Docker에 익숙해야합니다.) -``` sh -#docker-compose.yml을 수정하여 계획 1 및 계획 2을 삭제하고 계획 3을 유지합니다. docker-compose.yml에서 계획 3의 구성을 수정하면 됩니다. 주석을 참조하십시오. -docker-compose up -``` - - -## 설치 - 방법 3 : 다른 배치 방법 - -1. 리버스 프록시 URL / Microsoft Azure API 사용 방법 -API_URL_REDIRECT를 `config.py`에 따라 구성하면됩니다. - -2. 원격 클라우드 서버 배치 (클라우드 서버 지식과 경험이 필요합니다.) -[배치위키-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)에 방문하십시오. - -3. WSL2 사용 (Windows Subsystem for Linux 하위 시스템) -[배치 위키-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)에 방문하십시오. - -4. 2 차 URL (예 : `http : //localhost/subpath`)에서 실행하는 방법 -[FastAPI 실행 설명서] (docs / WithFastapi.md)를 참조하십시오. - -5. docker-compose 실행 -docker-compose.yml을 읽은 후 지시 사항에 따라 작업하십시오. ---- -# 고급 사용법 -## 사용자 정의 바로 가기 버튼 / 사용자 정의 함수 플러그인 - -1. 사용자 정의 바로 가기 버튼 (학술 바로 가기) -임의의 텍스트 편집기로 'core_functional.py'를 엽니다. 엔트리 추가, 그런 다음 프로그램을 다시 시작하면됩니다. (버튼이 이미 추가되어 보이고 접두사, 접미사가 모두 변수가 효과적으로 수정되면 프로그램을 다시 시작하지 않아도됩니다.) -예 : -``` -"超级英译中": { - # 접두사. 당신이 요구하는 것을 설명하는 데 사용됩니다. 예를 들어 번역, 코드를 설명, 다듬기 등 - "Prefix": "下面翻译成中文,然后用一个 markdown 表格逐一解释文中出现的专有名词:\n\n", - - # 접미사는 입력 내용 앞뒤에 추가됩니다. 예를 들어 전위를 사용하여 입력 내용을 따옴표로 묶는데 사용할 수 있습니다. - "Suffix": "", -}, -``` -
- -
- -2. 사용자 지정 함수 플러그인 -강력한 함수 플러그인을 작성하여 원하는 작업을 수행하십시오. -이 프로젝트의 플러그인 작성 및 디버깅 난이도는 매우 낮으며, 일부 파이썬 기본 지식만 있으면 제공된 템플릿을 모방하여 플러그인 기능을 구현할 수 있습니다. 자세한 내용은 [함수 플러그인 가이드]를 참조하십시오. (https://github.com/binary -husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E 4%BB%B6%E6%8C%87%E5%8D%97). ---- -# 최신 업데이트 -## 새로운 기능 동향1. 대화 저장 기능. - -1. 함수 플러그인 영역에서 '현재 대화 저장'을 호출하면 현재 대화를 읽을 수 있고 복원 가능한 HTML 파일로 저장할 수 있습니다. 또한 함수 플러그인 영역(드롭다운 메뉴)에서 '대화 기록 불러오기'를 호출하면 이전 대화를 복원할 수 있습니다. 팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 클릭하면 기록된 HTML 캐시를 볼 수 있으며 '모든 로컬 대화 기록 삭제'를 클릭하면 모든 HTML 캐시를 삭제할 수 있습니다. - -2. 보고서 생성. 대부분의 플러그인은 실행이 끝난 후 작업 보고서를 생성합니다. - -3. 모듈화 기능 설계, 간단한 인터페이스로도 강력한 기능을 지원할 수 있습니다. - -4. 자체 번역이 가능한 오픈 소스 프로젝트입니다. - -5. 다른 오픈 소스 프로젝트를 번역하는 것은 어렵지 않습니다. - -6. [live2d](https://github.com/fghrsh/live2d_demo) 장식 기능(기본적으로 비활성화되어 있으며 `config.py`를 수정해야 합니다.) - -7. MOSS 대 언어 모델 지원 추가 - -8. OpenAI 이미지 생성 - -9. OpenAI 음성 분석 및 요약 - -10. LaTeX 전체적인 교정 및 오류 수정 - -## 버전: -- version 3.5 (TODO): 자연어를 사용하여 이 프로젝트의 모든 함수 플러그인을 호출하는 기능(우선순위 높음) -- version 3.4(TODO): 로컬 대 모듈의 다중 스레드 지원 향상 -- version 3.3: 인터넷 정보 종합 기능 추가 -- version 3.2: 함수 플러그인이 더 많은 인수 인터페이스를 지원합니다.(대화 저장 기능, 임의의 언어 코드 해석 및 동시에 임의의 LLM 조합을 확인하는 기능) -- version 3.1: 여러 개의 GPT 모델에 대한 동시 쿼리 지원! api2d 지원, 여러 개의 apikey 로드 밸런싱 지원 -- version 3.0: chatglm 및 기타 소형 llm의 지원 -- version 2.6: 플러그인 구조를 재구성하여 상호 작용성을 향상시켰습니다. 더 많은 플러그인을 추가했습니다. -- version 2.5: 자체 업데이트, 전체 프로젝트를 요약할 때 텍스트가 너무 길어지고 토큰이 오버플로우되는 문제를 해결했습니다. -- version 2.4: (1) PDF 전체 번역 기능 추가; (2) 입력 영역 위치 전환 기능 추가; (3) 수직 레이아웃 옵션 추가; (4) 다중 스레드 함수 플러그인 최적화. -- version 2.3: 다중 스레드 상호 작용성 강화 -- version 2.2: 함수 플러그인 히트 리로드 지원 -- version 2.1: 접는 레이아웃 지원 -- version 2.0: 모듈화 함수 플러그인 도입 -- version 1.0: 기본 기능 - -gpt_academic 개발자 QQ 그룹-2 : 610599535 - -- 알려진 문제 - - 일부 브라우저 번역 플러그인이이 소프트웨어의 프론트 엔드 작동 방식을 방해합니다. - - gradio 버전이 너무 높거나 낮으면 여러 가지 이상이 발생할 수 있습니다. - -## 참고 및 학습 자료 - -``` -많은 우수 프로젝트의 디자인을 참고했습니다. 주요 항목은 다음과 같습니다. - -# 프로젝트 1 : Tsinghua ChatGLM-6B : -https://github.com/THUDM/ChatGLM-6B - -# 프로젝트 2 : Tsinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# 프로젝트 3 : Edge-GPT : -https://github.com/acheong08/EdgeGPT - -# 프로젝트 4 : ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 프로젝트 5 : ChatPaper : -https://github.com/kaixindelele/ChatPaper - -# 더 많은 : -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md deleted file mode 100644 index 2347d5a74f7c7c90b670fd0368aa447ee2660113..0000000000000000000000000000000000000000 --- a/docs/README.md.Portuguese.md +++ /dev/null @@ -1,324 +0,0 @@ -> **Nota** -> -> Ao instalar as dependências, por favor, selecione rigorosamente as versões **especificadas** no arquivo requirements.txt. -> -> `pip install -r requirements.txt` -> - -# Otimização acadêmica GPT (GPT Academic) - -**Se você gostou deste projeto, por favor dê um Star. Se você criou atalhos acadêmicos mais úteis ou plugins funcionais, sinta-se livre para abrir uma issue ou pull request. Nós também temos um README em [Inglês|](README_EN.md)[日本語|](README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](README_RS.md)[Français](README_FR.md) traduzidos por este próprio projeto. -Para traduzir este projeto para qualquer idioma com o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental). - -> **Nota** -> -> 1. Por favor, preste atenção que somente os plugins de funções (botões) com a cor **vermelha** podem ler arquivos. Alguns plugins estão localizados no **menu suspenso** na área de plugins. Além disso, nós damos as boas-vindas com a **maior prioridade** e gerenciamos quaisquer novos plugins PR! -> -> 2. As funções de cada arquivo neste projeto são detalhadas em [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A), auto-análises do projeto geradas pelo GPT também estão podem ser chamadas a qualquer momento ao clicar nos plugins relacionados. As perguntas frequentes estão resumidas no [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Instruções de Instalação](#installation). -> -> 3. Este projeto é compatível com e incentiva o uso de modelos de linguagem nacionais, como chatglm e RWKV, Pangolin, etc. Suporta a coexistência de várias chaves de API e pode ser preenchido no arquivo de configuração como `API_KEY="openai-key1,openai-key2,api2d-key3"`. Quando precisar alterar temporariamente o `API_KEY`, basta digitar o `API_KEY` temporário na área de entrada e pressionar Enter para que ele entre em vigor. - -
- -Funcionalidade | Descrição ---- | --- -Um clique de polimento | Suporte a um clique polimento, um clique encontrar erros de gramática no artigo -Tradução chinês-inglês de um clique | Tradução chinês-inglês de um clique -Explicação de código de um único clique | Exibir código, explicar código, gerar código, adicionar comentários ao código -[Teclas de atalho personalizadas](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte a atalhos personalizados -Projeto modular | Suporte para poderosos plugins[de função personalizada](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), os plugins suportam[hot-reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Análise automática do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função][um clique para entender](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) o código-fonte do projeto -[Análise do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função] Um clique pode analisar a árvore de projetos do Python/C/C++/Java/Lua/... -Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin de função] um clique para interpretar o resumo de artigos LaTeX/PDF e gerar resumo -Tradução completa LATEX, polimento|[Plugin de função] Uma clique para traduzir ou polir um artigo LATEX -Geração em lote de comentários | [Plugin de função] Um clique gera comentários de função em lote -[Tradução chinês-inglês](https://www.bilibili.com/video/BV1yo4y157jV/) markdown | [Plugin de função] Você viu o README em 5 linguagens acima? -Relatório de análise de chat | [Plugin de função] Gera automaticamente um resumo após a execução -[Funcionalidade de tradução de artigos completos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin de função] Extrai o título e o resumo do artigo PDF e traduz o artigo completo (multithread) -Assistente arXiv | [Plugin de função] Insira o url do artigo arXiv para traduzir o resumo + baixar PDF -Assistente de integração acadêmica do Google | [Plugin de função] Dê qualquer URL de página de pesquisa acadêmica do Google e deixe o GPT escrever[trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/) -Agregação de informações da Internet + GPT | [Plugin de função] Um clique para obter informações do GPT através da Internet e depois responde a perguntas para informações nunca ficarem desatualizadas -Exibição de fórmulas/imagem/tabela | Pode exibir simultaneamente a forma de renderização e[TEX] das fórmulas, suporte a fórmulas e realce de código -Suporte de plugins de várias linhas | Suporte a várias chamadas em linha do chatgpt, um clique para processamento[de massa de texto](https://www.bilibili.com/video/BV1FT411H7c5/) ou programa -Tema gradio escuro | Adicione ``` /?__theme=dark``` ao final da url do navegador para ativar o tema escuro -[Suporte para vários modelos LLM](https://www.bilibili.com/video/BV1wT411p7yf), suporte para a nova interface API2D | A sensação de ser atendido simultaneamente por GPT3.5, GPT4, [Chatglm THU](https://github.com/THUDM/ChatGLM-6B), [Moss Fudan](https://github.com/OpenLMLab/MOSS) deve ser ótima, certo? -Mais modelos LLM incorporados, suporte para a implantação[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Adicione interface Newbing (New Bing), suporte [JittorLLMs](https://github.com/Jittor/JittorLLMs) THU Introdução ao suporte do LLaMA, RWKV e Pan Gu Alpha -Mais recursos novos mostrados (geração de imagens, etc.) ... | Consulte o final deste documento ... - -
- -- Nova interface (Modifique a opção LAYOUT em `config.py` para alternar entre o layout esquerdo/direito e o layout superior/inferior) -
- -
- All buttons are dynamically generated by reading functional.py, and you can add custom functions at will, liberating the clipboard - -
- -
- -- Proofreading/errors correction - - -
- -
- -- If the output contains formulas, it will be displayed in both tex and rendering format at the same time, which is convenient for copying and reading - - -
- -
- -- Don't want to read the project code? Just show the whole project to chatgpt - - -
- -
- -- Mix the use of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) - - -
- -
- ---- -# Instalação -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project - -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API KEY - -In `config.py`, configure API KEY and other settings, [Special Network Environment Settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py`, and use the configuration in it to cover the configuration with the same name in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`. The writing format of environment variables is referenced to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` > `config.py`) - - -3. Install dependencies - -```sh -# (Option I: for those familiar with python)(python version is 3.9 or above, the newer the better), note: use the official pip source or the Alibaba pip source. Temporary solution for changing source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: for those who are unfamiliar with python) use anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create anaconda environment -conda activate gptac_venv # activate anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation step -``` - -
If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, click to expand here -

- -[Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong): -```sh -# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# 【Optional Step II】support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path - -# 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - -4. Run - -```sh -python main.py -```5. Plugin de Função de Teste -``` -- Função de modelo de plug-in de teste (exige que o GPT responda ao que aconteceu hoje na história), você pode usar esta função como modelo para implementar funções mais complexas - Clique em "[Função de plug-in de modelo de demonstração] O que aconteceu hoje na história?" -``` - -## Instalação - Método 2: Usando o Docker - -1. Apenas ChatGPT (recomendado para a maioria das pessoas) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Baixar o projeto -cd gpt_academic # Entrar no caminho -nano config.py # Editar config.py com qualquer editor de texto configurando "Proxy", "API_KEY" e "WEB_PORT" (por exemplo, 50923), etc. -docker build -t gpt-academic . # Instale - -# (Ùltima etapa - escolha 1) Dentro do ambiente Linux, é mais fácil e rápido usar `--net=host` -docker run --rm -it --net=host gpt-academic -# (Última etapa - escolha 2) Em ambientes macOS/windows, você só pode usar a opção -p para expor a porta do contêiner (por exemplo, 50923) para a porta no host -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (conhecimento de Docker necessário) - -``` sh -# Edite o arquivo docker-compose.yml, remova as soluções 1 e 3, mantenha a solução 2, e siga as instruções nos comentários do arquivo -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (conhecimento de Docker necessário) -``` sh -# Edite o arquivo docker-compose.yml, remova as soluções 1 e 2, mantenha a solução 3, e siga as instruções nos comentários do arquivo -docker-compose up -``` - - -## Instalação - Método 3: Outros Métodos de Implantação - -1. Como usar URLs de proxy inverso/microsoft Azure API -Basta configurar o API_URL_REDIRECT de acordo com as instruções em `config.py`. - -2. Implantação em servidores em nuvem remotos (requer conhecimento e experiência de servidores em nuvem) -Acesse [Wiki de implementação remota do servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Usando a WSL2 (sub-sistema do Windows para Linux) -Acesse [Wiki da implantação da WSL2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. Como executar em um subdiretório (ex. `http://localhost/subpath`) -Acesse [Instruções de execução FastAPI](docs/WithFastapi.md) - -5. Execute usando o docker-compose -Leia o arquivo docker-compose.yml e siga as instruções. - -# Uso Avançado -## Customize novos botões de acesso rápido / plug-ins de função personalizados - -1. Personalizar novos botões de acesso rápido (atalhos acadêmicos) -Abra `core_functional.py` em qualquer editor de texto e adicione os seguintes itens e reinicie o programa (Se o botão já foi adicionado e pode ser visto, prefixos e sufixos são compatíveis com modificações em tempo real e não exigem reinício do programa para ter efeito.) -Por exemplo, -``` -"Super Eng:": { -  # Prefixo, será adicionado antes da sua entrada. Por exemplo, para descrever sua solicitação, como tradução, explicação de código, polimento, etc. -  "Prefix": "Por favor, traduza o seguinte conteúdo para chinês e use uma tabela em Markdown para explicar termos próprios no texto: \n \n", - -  # Sufixo, será adicionado após a sua entrada. Por exemplo, emparelhado com o prefixo, pode colocar sua entrada entre aspas. -  "Suffix": "", -}, -``` -
- -
- -2. Personalizar plug-ins de função - -Escreva plug-ins de função poderosos para executar tarefas que você deseja e não pensava possível. -A dificuldade geral de escrever e depurar plug-ins neste projeto é baixa e, se você tem algum conhecimento básico de python, pode implementar suas próprias funções sobre o modelo que fornecemos. -Para mais detalhes, consulte o [Guia do plug-in de função.](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Última atualização -## Novas funções dinâmicas. - -1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html. -
- -
- - -2. Geração de relatório. A maioria dos plug-ins gera um relatório de trabalho após a conclusão da execução. -
- - - -
- -3. Design modular de funcionalidades, com interfaces simples, mas suporte a recursos poderosos -
- - -
- -4. Este é um projeto de código aberto que é capaz de "auto-traduzir-se". -
- -
- -5. A tradução de outros projetos de código aberto é simples. -
- -
- -
- -
- -6. Recursos decorativos para o [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, é necessário modificar o arquivo `config.py`) -
- -
- -7. Suporte ao modelo de linguagem MOSS -
- -
- -8. Geração de imagens pelo OpenAI -
- -
- -9. Análise e resumo de áudio pelo OpenAI -
- -
- -10. Revisão e correção de erros de texto em Latex. -
- -
- -## Versão: -- Versão 3.5(Todo): Usar linguagem natural para chamar todas as funções do projeto (prioridade alta) -- Versão 3.4(Todo): Melhorar o suporte à multithread para o chatglm local -- Versão 3.3: +Funções integradas de internet -- Versão 3.2: Suporte a mais interfaces de parâmetros de plug-in (função de salvar diálogo, interpretação de códigos de várias linguagens, perguntas de combinações LLM arbitrárias ao mesmo tempo) -- Versão 3.1: Suporte a perguntas a vários modelos de gpt simultaneamente! Suporte para api2d e balanceamento de carga para várias chaves api -- Versão 3.0: Suporte ao chatglm e outros LLMs de pequeno porte -- Versão 2.6: Refatoração da estrutura de plug-in, melhoria da interatividade e adição de mais plug-ins -- Versão 2.5: Autoatualização, resolvendo problemas de token de texto excessivamente longo e estouro ao compilar grandes projetos -- Versão 2.4: (1) Adição de funcionalidade de tradução de texto completo em PDF; (2) Adição de funcionalidade de mudança de posição da área de entrada; (3) Adição de opção de layout vertical; (4) Otimização de plug-ins de multithread. -- Versão 2.3: Melhoria da interatividade de multithread -- Versão 2.2: Suporte à recarga a quente de plug-ins -- Versão 2.1: Layout dobrável -- Versão 2.0: Introdução de plug-ins de função modular -- Versão 1.0: Funcionalidades básicasgpt_academic desenvolvedores QQ grupo-2: 610599535 - -- Problemas conhecidos - - Extensões de tradução de alguns navegadores podem interferir na execução do front-end deste software - - Uma versão muito alta ou muito baixa do Gradio pode causar vários erros - -## Referências e Aprendizado - -``` -Foi feita referência a muitos projetos excelentes em código, principalmente: - -# Projeto1: ChatGLM-6B da Tsinghua: -https://github.com/THUDM/ChatGLM-6B - -# Projeto2: JittorLLMs da Tsinghua: -https://github.com/Jittor/JittorLLMs - -# Projeto3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Projeto4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projeto5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Mais: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README_EN.md b/docs/README_EN.md deleted file mode 100644 index 02b8588c38f1b52228840b882e509064daecb3f0..0000000000000000000000000000000000000000 --- a/docs/README_EN.md +++ /dev/null @@ -1,322 +0,0 @@ -> **Note** -> -> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct. -> -> When installing dependencies, **please strictly select the versions** specified in requirements.txt. -> -> `pip install -r requirements.txt` - -# GPT Academic Optimization (GPT Academic) - -**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. -To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).** - -> Note: -> -> 1. Please note that only the function plugins (buttons) marked in **red** support reading files. Some plugins are in the **drop-down menu** in the plugin area. We welcome and process any new plugins with the **highest priority**! -> 2. The function of each file in this project is detailed in the self-translation analysis [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). With version iteration, you can also click on related function plugins at any time to call GPT to regenerate the project's self-analysis report. Common questions are summarized in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installation method](#installation). -> 3. This project is compatible with and encourages trying domestic large language models such as chatglm, RWKV, Pangu, etc. Multiple API keys are supported and can be filled in the configuration file like `API_KEY="openai-key1,openai-key2,api2d-key3"`. When temporarily changing `API_KEY`, enter the temporary `API_KEY` in the input area and press enter to submit, which will take effect. - -
- -Function | Description ---- | --- -One-click polishing | Supports one-click polishing and one-click searching for grammar errors in papers. -One-click Chinese-English translation | One-click Chinese-English translation. -One-click code interpretation | Displays, explains, generates, and adds comments to code. -[Custom shortcut keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys. -Modular design | Supports custom powerful [function plug-ins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plug-ins support [hot update](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Self-program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] [One-click understanding](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the source code of this project -[Program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] One-click profiling of other project trees in Python/C/C++/Java/Lua/... -Reading papers, [translating](https://www.bilibili.com/video/BV1KT411x7Wn) papers | [Function Plug-in] One-click interpretation of latex/pdf full-text papers and generation of abstracts. -Latex full-text [translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [polishing](https://www.bilibili.com/video/BV1FT411H7c5/) | [Function plug-in] One-click translation or polishing of latex papers. -Batch annotation generation | [Function plug-in] One-click batch generation of function annotations. -Markdown [Chinese-English translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Function plug-in] Have you seen the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the five languages above? -Chat analysis report generation | [Function plug-in] Automatically generate summary reports after running. -[PDF full-text translation function](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function plug-in] PDF paper extract title & summary + translate full text (multi-threaded) -[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function plug-in] Enter the arxiv article url and you can translate abstracts and download PDFs with one click. -[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function plug-in] Given any Google Scholar search page URL, let GPT help you [write relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -Internet information aggregation+GPT | [Function plug-in] One-click [let GPT get information from the Internet first](https://www.bilibili.com/video/BV1om4y127ck), then answer questions, and let the information never be outdated. -Formula/image/table display | Can display formulas in both [tex form and render form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formulas and code highlighting. -Multi-threaded function plug-in support | Supports multi-threaded calling of chatgpt, and can process [massive text](https://www.bilibili.com/video/BV1FT411H7c5/) or programs with one click. -Start Dark Gradio [theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` after the browser URL to switch to the dark theme. -[Multiple LLM models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | The feeling of being served by GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time must be great, right? -More LLM model access, support [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Add Newbing interface (New Bing), introduce Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) and [Panguα](https://openi.org.cn/pangu/) -More new feature displays (image generation, etc.)…… | See the end of this document for more... -
- -- New interface (modify the LAYOUT option in `config.py` to switch between "left and right layout" and "up and down layout") -
- -
- All buttons are dynamically generated by reading `functional.py`, and you can add custom functions freely to unleash the power of clipboard. -
- -
- -- polishing/correction -
- -
- -- If the output contains formulas, they will be displayed in both `tex` and render form, making it easy to copy and read. -
- -
- -- Tired of reading the project code? ChatGPT can explain it all. -
- -
- -- Multiple large language models are mixed, such as ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4. -
- -
- ---- -# Installation -## Method 1: Directly running (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API_KEY - -Configure the API KEY in `config.py`, [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py` and use the configurations in it to override the same configurations in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configurations in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your private information more secure. P.S. The project also supports configuring most options through `environment variables`. Please refer to the format of `docker-compose` file when writing. Reading priority: `environment variables` > `config_private.py` > `config.py`) - - -3. Install the dependencies -```sh -# (Option I: If familiar with python) (python version 3.9 or above, the newer the better), note: use official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If not familiar with python) Use anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create anaconda environment -conda activate gptac_venv # activate anaconda environment -python -m pip install -r requirements.txt # this step is the same as pip installation -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand -

- -[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough): -```sh -# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project - -# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run it -```sh -python main.py -```5. Test Function Plugin -``` -- Test function plugin template function (ask GPT what happened today in history), based on which you can implement more complex functions as a template - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation - Method 2: Using Docker - -1. ChatGPT Only (Recommended for Most People) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download project -cd gpt_academic # Enter path -nano config.py # Edit config.py with any text editor, configure "Proxy", "API_KEY" and "WEB_PORT" (e.g. 50923), etc. -docker build -t gpt-academic . # Install - -#(Last step - option 1) In a Linux environment, use `--net=host` for convenience and speed. -docker run --rm -it --net=host gpt-academic -#(Last step - option 2) On macOS/windows environment, only -p option can be used to expose the container's port (e.g. 50923) to the port of the main machine. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Requires Docker Knowledge) - -``` sh -# Modify docker-compose.yml, delete Plan 1 and Plan 3, and keep Plan 2. Modify the configuration of Plan 2 in docker-compose.yml, refer to the comments in it for configuration. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (Requires Docker Knowledge) - -``` sh -# Modify docker-compose.yml, delete Plan 1 and Plan 2, and keep Plan 3. Modify the configuration of Plan 3 in docker-compose.yml, refer to the comments in it for configuration. -docker-compose up -``` - -## Installation - Method 3: Other Deployment Options - -1. How to Use Reverse Proxy URL/Microsoft Cloud Azure API -Configure API_URL_REDIRECT according to the instructions in 'config.py'. - -2. Deploy to a Remote Server (Requires Knowledge and Experience with Cloud Servers) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to Run Under a Subdomain (e.g. `http://localhost/subpath`) -Please visit [FastAPI Running Instructions](docs/WithFastapi.md) - -5. Using docker-compose to Run -Read the docker-compose.yml and follow the prompts. - ---- -# Advanced Usage -## Custom New Shortcut Buttons / Custom Function Plugins - -1. Custom New Shortcut Buttons (Academic Hotkey) -Open `core_functional.py` with any text editor, add an entry as follows and restart the program. (If the button has been successfully added and is visible, the prefix and suffix can be hot-modified without having to restart the program.) -For example, -``` -"Super English-to-Chinese": { - # Prefix, which will be added before your input. For example, used to describe your requests, such as translation, code explanation, polishing, etc. - "Prefix": "Please translate the following content into Chinese and then use a markdown table to explain the proprietary terms that appear in the text:\n\n", - - # Suffix, which is added after your input. For example, with the prefix, your input content can be surrounded by quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom Function Plugins - -Write powerful function plugins to perform any task you can think of, even those you cannot think of. -The difficulty of plugin writing and debugging in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plug-in functions based on the template we provide. -For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New Feature Dynamics -1. Conversation saving function. Call `Save current conversation` in the function plugin area to save the current conversation as a readable and recoverable HTML file. In addition, call `Load conversation history archive` in the function plugin area (dropdown menu) to restore previous sessions. Tip: Clicking `Load conversation history archive` without specifying a file will display the cached history of HTML archives, and clicking `Delete all local conversation history` will delete all HTML archive caches. - -
- -
- - -2. Report generation. Most plugins will generate work reports after execution. - -
- - - -
- - -3. Modular function design with simple interfaces that support powerful functions. - -
- - -
- - -4. This is an open-source project that can "self-translate". - -
- -
- -5. Translating other open-source projects is a piece of cake. - -
- -
- -
- -
- -6. A small feature decorated with [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, need to modify `config.py`). - -
- -
- -7. Added MOSS large language model support. -
- -
- -8. OpenAI image generation. -
- -
- -9. OpenAI audio parsing and summarization. -
- -
- -10. Full-text proofreading and error correction of LaTeX. -
- -
- - -## Versions: -- version 3.5(Todo): Use natural language to call all function plugins of this project (high priority). -- version 3.4(Todo): Improve multi-threading support for chatglm local large models. -- version 3.3: +Internet information integration function. -- version 3.2: Function plugin supports more parameter interfaces (save conversation function, interpretation of any language code + simultaneous inquiry of any LLM combination). -- version 3.1: Support simultaneous inquiry of multiple GPT models! Support api2d, and support load balancing of multiple apikeys. -- version 3.0: Support chatglm and other small LLM models. -- version 2.6: Refactored plugin structure, improved interactivity, and added more plugins. -- version 2.5: Self-updating, solving the problem of text overflow and token overflow when summarizing large engineering source codes. -- version 2.4: (1) Added PDF full-text translation function; (2) Added the function of switching the position of the input area; (3) Added vertical layout option; (4) Optimized multi-threading function plugins. -- version 2.3: Enhanced multi-threading interactivity. -- version 2.2: Function plugin supports hot reloading. -- version 2.1: Collapsible layout. -- version 2.0: Introduction of modular function plugins. -- version 1.0: Basic functions. - -gpt_academic Developer QQ Group-2: 610599535 - -- Known Issues - - Some browser translation plugins interfere with the front-end operation of this software. - - Both high and low versions of gradio can lead to various exceptions. - -## Reference and Learning - -``` -Many other excellent designs have been referenced in the code, mainly including: - -# Project 1: THU ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# Project 2: THU JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# Project 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Project 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Project 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# More: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_FR.md b/docs/README_FR.md deleted file mode 100644 index af3bb42c7904361631ba0dff72e841a13047731b..0000000000000000000000000000000000000000 --- a/docs/README_FR.md +++ /dev/null @@ -1,323 +0,0 @@ -> **Note** -> -> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%. -> -> During installation, please strictly select the versions **specified** in requirements.txt. -> -> `pip install -r requirements.txt` -> - -# Optimisation académique GPT (GPT Academic) - -**Si vous aimez ce projet, veuillez lui donner une étoile. Si vous avez trouvé des raccourcis académiques ou des plugins fonctionnels plus utiles, n'hésitez pas à ouvrir une demande ou une pull request. -Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental). - -> **Note** -> -> 1. Veuillez noter que seuls les plugins de fonctions (boutons) **en rouge** prennent en charge la lecture de fichiers. Certains plugins se trouvent dans le **menu déroulant** de la zone de plugins. De plus, nous accueillons et traitons les nouvelles pull requests pour les plugins avec **la plus haute priorité**! -> -> 2. Les fonctions de chaque fichier de ce projet sont expliquées en détail dans l'auto-analyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins de fonctions pertinents et appeler GPT pour régénérer le rapport d'auto-analyse du projet à tout moment. Les FAQ sont résumées dans [le wiki](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Méthode d'installation](#installation). -> -> 3. Ce projet est compatible avec et encourage l'utilisation de grands modèles de langage nationaux tels que chatglm, RWKV, Pangu, etc. La coexistence de plusieurs clés API est prise en charge et peut être remplie dans le fichier de configuration, tel que `API_KEY="openai-key1,openai-key2,api2d-key3"`. Lorsque vous souhaitez remplacer temporairement `API_KEY`, saisissez temporairement `API_KEY` dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer. - -
- -Functionnalité | Description ---- | --- -Révision en un clic | prend en charge la révision en un clic et la recherche d'erreurs de syntaxe dans les articles -Traduction chinois-anglais en un clic | Traduction chinois-anglais en un clic -Explication de code en un clic | Affichage, explication, génération et ajout de commentaires de code -[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | prend en charge les raccourcis personnalisés -Conception modulaire | prend en charge de puissants plugins de fonction personnalisée, les plugins prennent en charge la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Autoscanner](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] [Compréhension instantanée](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) du code source de ce projet -[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] Analyse en un clic de la structure d'autres projets Python / C / C ++ / Java / Lua / ... -Lecture d'articles, [traduction](https://www.bilibili.com/video/BV1KT411x7Wn) d'articles | [Plug-in de fonction] Compréhension instantanée de l'article latex / pdf complet et génération de résumés -[Traduction](https://www.bilibili.com/video/BV1nk4y1Y7Js/) et [révision](https://www.bilibili.com/video/BV1FT411H7c5/) complets en latex | [Plug-in de fonction] traduction ou révision en un clic d'articles en latex -Génération de commentaires en masse | [Plug-in de fonction] Génération en un clic de commentaires de fonction en masse -Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) en Markdown | [Plug-in de fonction] avez-vous vu la [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) pour les 5 langues ci-dessus? -Génération de rapports d'analyse de chat | [Plug-in de fonction] Génère automatiquement un rapport de résumé après l'exécution -[Traduction intégrale en pdf](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plug-in de fonction] Extraction de titre et de résumé de l'article pdf + traduction intégrale (multi-thread) -[Aide à arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plug-in de fonction] Entrer l'url de l'article arxiv pour traduire et télécharger le résumé en un clic -[Aide à la recherche Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plug-in de fonction] Donnez l'URL de la page de recherche Google Scholar, laissez GPT vous aider à [écrire des ouvrages connexes](https://www.bilibili.com/video/BV1GP411U7Az/) -Aggrégation d'informations en ligne et GPT | [Plug-in de fonction] Permet à GPT de [récupérer des informations en ligne](https://www.bilibili.com/video/BV1om4y127ck), puis de répondre aux questions, afin que les informations ne soient jamais obsolètes -Affichage d'équations / images / tableaux | Fournit un affichage simultané de [la forme tex et de la forme rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), prend en charge les formules mathématiques et la coloration syntaxique du code -Prise en charge des plugins à plusieurs threads | prend en charge l'appel multithread de chatgpt, un clic pour traiter [un grand nombre d'articles](https://www.bilibili.com/video/BV1FT411H7c5/) ou de programmes -Thème gradio sombre en option de démarrage | Ajoutez```/?__theme=dark``` à la fin de l'URL du navigateur pour basculer vers le thème sombre -[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Sera probablement très agréable d'être servi simultanément par GPT3.5, GPT4, [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS) -Plus de modèles LLM, déploiement de [huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout prise en charge de l'interface Newbing (nouvelle bing), introduction du support de [Jittorllms de Tsinghua](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) et [Panguα](https://openi.org.cn/pangu/) -Plus de nouvelles fonctionnalités (génération d'images, etc.) ... | Voir la fin de ce document pour plus de détails ... - -
- - -- Nouvelle interface (modifier l'option LAYOUT de `config.py` pour passer d'une disposition ``gauche-droite`` à une disposition ``haut-bas``) -
- -
- Tous les boutons sont générés dynamiquement en lisant functional.py et peuvent être facilement personnalisés pour ajouter des fonctionnalités personnalisées, ce qui facilite l'utilisation du presse-papiers. -
- -
- -- Correction d'erreurs/lissage du texte. -
- -
- -- Si la sortie contient des équations, elles sont affichées à la fois sous forme de tex et sous forme rendue pour faciliter la lecture et la copie. -
- -
- -- Pas envie de lire les codes de ce projet? Tout le projet est directement exposé par ChatGPT. -
- -
- -- Appel à une variété de modèles de langage de grande envergure (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/)-GPT4). -
- -
- ---- -# Installation -## Installation-Method 1: running directly (Windows, Linux or MacOS) - -1. Télécharger le projet -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configuration de la clé API - -Dans `config.py`, configurez la clé API et d'autres paramètres. Consultez [Special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. Lorsque le programme est exécuté, il vérifie en premier s'il existe un fichier de configuration privé nommé `config_private.py` et remplace les paramètres portant le même nom dans `config.py` par les paramètres correspondants dans `config_private.py`. Par conséquent, si vous comprenez la logique de lecture de nos configurations, nous vous recommandons vivement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de `config.py`. `config_private.py` n'est pas contrôlé par Git et peut garantir la sécurité de vos informations privées. P.S. Le projet prend également en charge la configuration de la plupart des options via "variables d'environnement", le format d'écriture des variables d'environnement est référencé dans le fichier `docker-compose`. Priorité de lecture: "variables d'environnement" > `config_private.py` > `config.py`) - - -3. Installer les dépendances -```sh -# (Option I: python users instalation) (Python version 3.9 or higher, the newer the better). Note: use official pip source or ali pip source. To temporarily change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: non-python users instalation) Use Anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create anaconda env -conda activate gptac_venv # Activate anaconda env -python -m pip install -r requirements.txt # Same step as pip instalation -``` - -
Cliquez ici pour afficher le texte si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend. -

- -【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur): -```sh -# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# 【Optional Step II】 Support FDU MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path. - -# 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Exécution -```sh -python main.py -```5. Plugin de fonction de test -``` -- Fonction de modèle de plugin de test (requiert que GPT réponde à ce qui s'est passé dans l'histoire aujourd'hui), vous pouvez utiliser cette fonction comme modèle pour mettre en œuvre des fonctionnalités plus complexes. - Cliquez sur "[Démo de modèle de plugin de fonction] Aujourd'hui dans l'histoire" -``` - -## Installation - Méthode 2: Utilisation de Docker - -1. ChatGPT uniquement (recommandé pour la plupart des gens) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Télécharger le projet -cd gpt_academic # Accéder au chemin -nano config.py # Editez config.py avec n'importe quel éditeur de texte en configurant "Proxy", "API_KEY" et "WEB_PORT" (p. ex. 50923) -docker build -t gpt-academic . # Installer - -# (Dernière étape - choix1) Dans un environnement Linux, l'utilisation de `--net=host` est plus facile et rapide -docker run --rm -it --net=host gpt-academic -# (Dernière étape - choix 2) Dans un environnement macOS/Windows, seule l'option -p permet d'exposer le port du récipient (p.ex. 50923) au port de l'hôte. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (il faut connaître Docker) - -``` sh -# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 3, conservez la solution 2. Modifiez la configuration de la solution 2 dans docker-compose.yml en suivant les commentaires. -docker-compose up -``` - -3. ChatGPT + LLAMA + PanGu + RWKV (il faut connaître Docker) -``` sh -# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 2, conservez la solution 3. Modifiez la configuration de la solution 3 dans docker-compose.yml en suivant les commentaires. -docker-compose up -``` - - -## Installation - Méthode 3: Autres méthodes de déploiement - -1. Comment utiliser une URL de proxy inversé / Microsoft Azure Cloud API -Configurez simplement API_URL_REDIRECT selon les instructions de config.py. - -2. Déploiement distant sur un serveur cloud (connaissance et expérience des serveurs cloud requises) -Veuillez consulter [Wiki de déploiement-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). - -3. Utilisation de WSL2 (sous-système Windows pour Linux) -Veuillez consulter [Wiki de déploiement-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2). - -4. Comment exécuter sous un sous-répertoire (tel que `http://localhost/subpath`) -Veuillez consulter les [instructions d'exécution de FastAPI] (docs/WithFastapi.md). - -5. Utilisation de docker-compose -Veuillez lire docker-compose.yml, puis suivre les instructions fournies. - -# Utilisation avancée -## Personnalisation de nouveaux boutons pratiques / Plugins de fonctions personnalisées - -1. Personnalisation de nouveaux boutons pratiques (raccourcis académiques) -Ouvrez core_functional.py avec n'importe quel éditeur de texte, ajoutez une entrée comme suit, puis redémarrez le programme. (Si le bouton a été ajouté avec succès et est visible, le préfixe et le suffixe prennent en charge les modifications à chaud et ne nécessitent pas le redémarrage du programme pour prendre effet.) -Par exemple -``` -"Super coller sens": { - # Préfixe, sera ajouté avant votre entrée. Par exemple, pour décrire votre demande, telle que traduire, expliquer du code, faire la mise en forme, etc. - "Prefix": "Veuillez traduire le contenu suivant en chinois, puis expliquer chaque terme proprement nommé qui y apparaît avec un tableau markdown:\n\n", - - # Suffixe, sera ajouté après votre entrée. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu d'entrée de guillemets. - "Suffix": "", -}, -``` -
- -
- -2. Plugins de fonctions personnalisées - -Écrivez des plugins de fonctions puissants pour effectuer toutes les tâches que vous souhaitez ou que vous ne pouvez pas imaginer. -Les plugins de ce projet ont une difficulté de programmation et de débogage très faible. Si vous avez des connaissances de base en Python, vous pouvez simuler la fonctionnalité de votre propre plugin en suivant le modèle que nous avons fourni. -Veuillez consulter le [Guide du plugin de fonction] (https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails. - ---- -# Latest Update - -## Nouvelles fonctionnalités en cours de déploiement. - -1. Fonction de sauvegarde de la conversation. -Appelez simplement "Enregistrer la conversation actuelle" dans la zone de plugin de fonction pour enregistrer la conversation actuelle en tant que fichier html lisible et récupérable. De plus, dans la zone de plugin de fonction (menu déroulant), appelez "Charger une archive de l'historique de la conversation" pour restaurer la conversation précédente. Astuce : cliquer directement sur "Charger une archive de l'historique de la conversation" sans spécifier de fichier permet de consulter le cache d'archive html précédent. Cliquez sur "Supprimer tous les enregistrements locaux de l'historique de la conversation" pour supprimer le cache d'archive html. - -
- -
- - - -2. Générer un rapport. La plupart des plugins génèrent un rapport de travail après l'exécution. -
- - - -
- -3. Conception de fonctionnalités modulaires avec une interface simple mais capable d'une fonctionnalité puissante. -
- - -
- -4. C'est un projet open source qui peut "se traduire de lui-même". -
- -
- -5. Traduire d'autres projets open source n'est pas un problème. -
- -
- -
- -
- -6. Fonction de décoration de live2d (désactivée par défaut, nécessite une modification de config.py). -
- -
- -7. Prise en charge du modèle de langue MOSS. -
- -
- -8. Génération d'images OpenAI. -
- -
- -9. Analyse et synthèse vocales OpenAI. -
- -
- -10. Correction de la totalité des erreurs de Latex. -
- -
- - -## Versions : -- version 3.5 (À faire) : appel de toutes les fonctions de plugin de ce projet en langage naturel (priorité élevée) -- version 3.4 (À faire) : amélioration du support multi-thread de chatglm en local -- version 3.3 : Fonctionnalité intégrée d'informations d'internet -- version 3.2 : La fonction du plugin de fonction prend désormais en charge des interfaces de paramètres plus nombreuses (fonction de sauvegarde, décodage de n'importe quel langage de code + interrogation simultanée de n'importe quelle combinaison de LLM) -- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Support api2d, équilibrage de charge multi-clé api. -- version 3.0 : Prise en charge de chatglm et autres LLM de petite taille. -- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de plus de plugins. -- version 2.5 : Auto-mise à jour, résolution des problèmes de texte trop long et de dépassement de jetons lors de la compilation du projet global. -- version 2.4 : (1) Nouvelle fonction de traduction de texte intégral PDF ; (2) Nouvelle fonction de permutation de position de la zone d'entrée ; (3) Nouvelle option de mise en page verticale ; (4) Amélioration des fonctions multi-thread de plug-in. -- version 2.3 : Amélioration de l'interactivité multithread. -- version 2.2 : Les plugins de fonctions peuvent désormais être rechargés à chaud. -- version 2.1 : Disposition pliable -- version 2.0 : Introduction de plugins de fonctions modulaires -- version 1.0 : Fonctionnalités de base - -gpt_academic développeur QQ groupe-2:610599535 - -- Problèmes connus - - Certains plugins de traduction de navigateur perturbent le fonctionnement de l'interface frontend de ce logiciel - - Des versions gradio trop hautes ou trop basses provoquent de nombreuses anomalies - -## Référence et apprentissage - -``` -De nombreux autres excellents projets ont été référencés dans le code, notamment : - -# Projet 1 : ChatGLM-6B de Tsinghua : -https://github.com/THUDM/ChatGLM-6B - -# Projet 2 : JittorLLMs de Tsinghua : -https://github.com/Jittor/JittorLLMs - -# Projet 3 : Edge-GPT : -https://github.com/acheong08/EdgeGPT - -# Projet 4 : ChuanhuChatGPT : -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projet 5 : ChatPaper : -https://github.com/kaixindelele/ChatPaper - -# Plus : -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_JP.md b/docs/README_JP.md deleted file mode 100644 index 46145e1f9cb70c5f7a0fba4e845338b186159778..0000000000000000000000000000000000000000 --- a/docs/README_JP.md +++ /dev/null @@ -1,329 +0,0 @@ -> **Note** -> -> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。 -> -> When installing dependencies, please strictly choose the versions specified in `requirements.txt`. -> -> `pip install -r requirements.txt` -> - -# GPT 学术优化 (GPT Academic) - -**もしこのプロジェクトが好きなら、星をつけてください。もしあなたがより良いアカデミックショートカットまたは機能プラグインを思いついた場合、Issueをオープンするか pull request を送信してください。私たちはこのプロジェクト自体によって翻訳された[英語 |](README_EN.md)[日本語 |](README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[Русский |](README_RS.md)[Français](README_FR.md)のREADMEも用意しています。 -GPTを使った任意の言語にこのプロジェクトを翻訳するには、[`multi_language.py`](multi_language.py)を読んで実行してください。 (experimental)。 - -> **注意** -> -> 1. **赤色**で表示された関数プラグイン(ボタン)のみ、ファイルの読み取りをサポートしています。一部のプラグインは、プラグインエリアの**ドロップダウンメニュー**内にあります。また、私たちはどんな新しいプラグインのPRでも、**最優先**で歓迎し、処理します! -> -> 2. このプロジェクトの各ファイルの機能は、自己解析の詳細説明書である[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)で説明されています。バージョンが進化するにつれて、関連する関数プラグインをいつでもクリックし、GPTを呼び出してプロジェクトの自己解析レポートを再生成することができます。よくある問題は[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)にまとめられています。[インストール方法](#installation)。 - -> 3. このプロジェクトは、chatglmやRWKV、パンクなど、国内の大規模自然言語モデルを利用することをサポートし、試みることを奨励します。複数のAPIキーを共存することができ、設定ファイルに`API_KEY="openai-key1,openai-key2,api2d-key3"`のように記入することができます。`API_KEY`を一時的に変更する場合は、入力エリアに一時的な`API_KEY`を入力してEnterキーを押せば、それが有効になります。 - - -
- -機能 | 説明 ---- | --- -一键校正 | 一键で校正可能、論文の文法エラーを検索することができる -一键中英翻訳 | 一键で中英翻訳可能 -一键コード解説 | コードを表示し、解説し、生成し、コードに注釈をつけることができる -[自分でカスタマイズ可能なショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | 自分でカスタマイズ可能なショートカットキーをサポートする -モジュール化された設計 | カスタマイズ可能な[強力な関数プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)をサポートし、プラグインは[ホットアップデート](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)に対応している -[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] [一键読解](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード -プログラム解析 | [関数プラグイン] 一鍵で他のPython/C/C++/Java/Lua/...プロジェクトを分析できる -論文の読み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] LaTex/ PDF論文の全文を一鍵で読み解き、要約を生成することができる -LaTex全文[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [関数プラグイン] LaTex論文の翻訳または校正を一鍵で行うことができる -一括で注釈を生成 | [関数プラグイン] 一鍵で関数に注釈をつけることができる -Markdown[中英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [関数プラグイン] 上記の5種類の言語の[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)を見たことがありますか? -チャット分析レポート生成 | [関数プラグイン] 実行後、自動的に概要報告書を生成する -[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文からタイトルと要約を抽出し、全文を翻訳する(マルチスレッド) -[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] arxiv記事のURLを入力するだけで、要約を一鍵翻訳し、PDFをダウンロードできる -[Google Scholar 総合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが[related works](https://www.bilibili.com/video/BV1GP411U7Az/)を作成する -インターネット情報収集+GPT | [関数プラグイン] まずGPTに[インターネットから情報を収集](https://www.bilibili.com/video/BV1om4y127ck)してから質問に回答させ、情報が常に最新であるようにする -数式/画像/表表示 | 数式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、数式、コードハイライトをサポートしている -マルチスレッド関数プラグインがサポートされている | chatgptをマルチスレッドで呼び出し、[大量のテキスト](https://www.bilibili.com/video/BV1FT411H7c5/)またはプログラムを一鍵で処理できる -ダークグラジオ[テーマの起動](https://github.com/binary-husky/gpt_academic/issues/173) | ブラウザのURLの後ろに```/?__theme=dark```を追加すると、ダークテーマを切り替えることができます。 -[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)がサポートされ、[API2D](https://api2d.com/)がサポートされている | 同時にGPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[復旦MOSS](https://github.com/OpenLMLab/MOSS)に対応 -より多くのLLMモデルが接続され、[huggingfaceデプロイ](https://huggingface.co/spaces/qingxu98/gpt-academic)がサポートされている | Newbingインターフェイス(Newbing)、清華大学の[Jittorllm](https://github.com/Jittor/JittorLLMs)のサポート[LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV)と[盘古α](https://openi.org.cn/pangu/) -さらに多くの新機能(画像生成など)を紹介する... | この文書の最後に示す... -
- -- 新しいインターフェース(`config.py`のLAYOUTオプションを変更することで、「左右配置」と「上下配置」を切り替えることができます) -
- -
- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to free the clipboard. - -
- -
- -- Polishing/Correction - -
- -
- -- If the output contains formulas, they are displayed in both TeX and rendering forms, making it easy to copy and read. - -
- -
- -- Don't feel like looking at the project code? Just ask chatgpt directly. - -
- -
- - -- Mixed calls of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) - -
- -
- ---- - -# Installation - -## Installation-Method 1: Directly run (Windows, Linux or MacOS) - -1. Download the project. - -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API_KEY. - -Configure the API KEY and other settings in `config.py` and [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py`, and use the configuration in it to override the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variables` > `config_private.py` > `config.py`) - -3. Install dependencies. - -```sh -# (Choose I: If familiar with Python)(Python version 3.9 or above, the newer the better) Note: Use the official pip source or Ali pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Choose II: If not familiar with Python) Use anaconda, the steps are the same (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create anaconda environment. -conda activate gptac_venv # Activate the anaconda environment. -python -m pip install -r requirements.txt # This step is the same as the pip installation step. -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand. -

- -[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough): - -```sh -# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). -python -m pip install -r request_llm/requirements_chatglm.txt - -# Optional Step II: Support Fudan MOSS. -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root. - -# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run. - -```sh -python main.py -```5. Testing Function Plugin -``` -- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation-Methods 2: Using Docker - -1. Only ChatGPT (recommended for most people) - - ``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download project -cd gpt_academic # Enter path -nano config.py # Edit config.py with any text editor ‑ configure "Proxy," "API_KEY," "WEB_PORT" (e.g., 50923) and more -docker build -t gpt-academic . # installation - -#(Last step-Option 1) In a Linux environment, `--net=host` is more convenient and quick -docker run --rm -it --net=host gpt-academic -#(Last step-Option 2) In a macOS/windows environment, the -p option must be used to expose the container port (e.g., 50923) to the port on the host. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker) - -``` sh -# Modify docker-compose.yml, delete plans 1 and 3, and retain plan 2. Modify the configuration of plan 2 in docker-compose.yml, and reference the comments for instructions. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (requires familiarity with Docker) -``` sh -# Modify docker-compose.yml, delete plans 1 and 2, and retain plan 3. Modify the configuration of plan 3 in docker-compose.yml, and reference the comments for instructions. -docker-compose up -``` - - -## Installation-Method 3: Other Deployment Methods - -1. How to use proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote Cloud Server Deployment (requires cloud server knowledge and experience) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux Subsystem) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run on a secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI Running Instructions](docs/WithFastapi.md) - -5. Run with docker-compose -Please read docker-compose.yml and follow the instructions provided therein. ---- -# Advanced Usage -## Customize new convenience buttons/custom function plugins - -1. Custom new convenience buttons (academic shortcut keys) -Open `core_functional.py` with any text editor, add the item as follows, and restart the program. (If the button has been added successfully and is visible, the prefix and suffix support hot modification without restarting the program.) -example: -``` -"Super English to Chinese Translation": { - # Prefix, which will be added before your input. For example, used to describe your request, such as translation, code interpretation, polish, etc. - "Prefix": "Please translate the following content into Chinese, and explain the proper nouns in the text in a markdown table one by one:\n\n", - - # Suffix, which will be added after your input. For example, in combination with the prefix, you can surround your input content with quotation marks. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugins - -Write powerful function plugins to perform any task you can and cannot think of. -The difficulty of writing and debugging plugins in this project is low, and as long as you have a certain amount of python basic knowledge, you can follow the template provided by us to achieve your own plugin functions. -For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New feature dynamics. -1. ダイアログの保存機能。関数プラグインエリアで '現在の会話を保存' を呼び出すと、現在のダイアログを読み取り可能で復元可能なHTMLファイルとして保存できます。さらに、関数プラグインエリア(ドロップダウンメニュー)で 'ダイアログの履歴保存ファイルを読み込む' を呼び出すことで、以前の会話を復元することができます。Tips:ファイルを指定せずに 'ダイアログの履歴保存ファイルを読み込む' をクリックすることで、過去のHTML保存ファイルのキャッシュを表示することができます。'すべてのローカルダイアログの履歴を削除' をクリックすることで、すべてのHTML保存ファイルのキャッシュを削除できます。 -
- -
- - -2. 報告書を生成します。ほとんどのプラグインは、実行が終了した後に作業報告書を生成します。 -
- - - -
- -3. モジュール化された機能設計、簡単なインターフェースで強力な機能をサポートする。 -
- - -
- -4. 自己解決可能なオープンソースプロジェクトです。 -
- -
- - -5. 他のオープンソースプロジェクトの解読、容易である。 -
- -
- -
- -
- -6. [Live2D](https://github.com/fghrsh/live2d_demo)のデコレート小機能です。(デフォルトでは閉じてますが、 `config.py`を変更する必要があります。) -
- -
- -7. 新たにMOSS大言語モデルのサポートを追加しました。 -
- -
- -8. OpenAI画像生成 -
- -
- -9. OpenAIオーディオの解析とサマリー -
- -
- -10. 全文校正されたLaTeX -
- -
- - -## バージョン: -- version 3.5(作業中):すべての関数プラグインを自然言語で呼び出すことができるようにする(高い優先度)。 -- version 3.4(作業中):chatglmのローカルモデルのマルチスレッドをサポートすることで、機能を改善する。 -- version 3.3:+Web情報の総合機能 -- version 3.2:関数プラグインでさらに多くのパラメータインターフェイスをサポートする(ダイアログの保存機能、任意の言語コードの解読+同時に任意のLLM組み合わせに関する問い合わせ) -- version 3.1:複数のGPTモデルを同時に質問できるようになりました! api2dをサポートし、複数のAPIキーを均等に負荷分散することができます。 -- version 3.0:chatglmとその他の小型LLMのサポート。 -- version 2.6:プラグイン構造を再構築し、対話内容を高め、より多くのプラグインを追加しました。 -- version 2.5:自己アップデートし、長文書やトークンのオーバーフローの問題を解決しました。 -- version 2.4:(1)全文翻訳のPDF機能を追加しました。(2)入力エリアの位置切り替え機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。 -- version 2.3:マルチスレッド性能の向上。 -- version 2.2:関数プラグインのホットリロードをサポートする。 -- version 2.1:折りたたみ式レイアウト。 -- version 2.0:モジュール化された関数プラグインを導入。 -- version 1.0:基本機能 - -gpt_academic開発者QQグループ-2:610599535 - -- 既知の問題 - - 一部のブラウザ翻訳プラグインが、このソフトウェアのフロントエンドの実行を妨害する - - gradioバージョンが高すぎるか低すぎると、多くの異常が引き起こされる - -## 参考学習 - -``` -コードの中には、他の優れたプロジェクトの設計から参考にしたものがたくさん含まれています: - -# プロジェクト1:清華ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# プロジェクト2:清華JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# プロジェクト3:Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# プロジェクト4:ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# プロジェクト5:ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# その他: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_RS.md b/docs/README_RS.md deleted file mode 100644 index d4888a0522ff6731ec74a4782b15d49bc6c6dd2d..0000000000000000000000000000000000000000 --- a/docs/README_RS.md +++ /dev/null @@ -1,278 +0,0 @@ -> **Note** -> -> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным. -> -# GPT Академическая оптимизация (GPT Academic) - -**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request. -Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный). - -> **Примечание** -> -> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов! -> -> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation). -> -> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу. - -> **Примечание** -> -> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание - -Вы профессиональный переводчик научных статей. - -Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами. - -## Результат - -Функция | Описание ---- | --- -Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях -Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский -Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода -[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш -Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/Function-Plug-in-Guide) -[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта -[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/... -Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме -Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи -Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций -[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) для этих 5 языков? -Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение -Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность) -[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF -[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/) -Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда -Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код -Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ -Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) -Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/) -Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard -
- -
- -- Revision/Correction -
- -
- -- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading -
- -
- -- Don't feel like looking at project code? Show the entire project directly in chatgpt -
- -
- -- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create an Anaconda environment -conda activate gptac_venv # activate Anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand -

- -[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong): -```sh -# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# [Optional step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path - -# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions - Click "[Function plugin Template Demo] On this day in history" -``` - -## Installation - Method 2: Using Docker - -1. ChatGPT only (recommended for most people) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # download the project -cd gpt_academic # enter the path -nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923) -docker build -t gpt-academic . # install - -# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster -docker run --rm -it --net=host gpt-academic -# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker) - -``` sh -# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - -3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker) -``` sh -# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - - -## Installation Method 3: Other Deployment Methods - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux subsystem) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at the secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI Operation Instructions](docs/WithFastapi.md) - -5. Using docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenient buttons / custom function plugins - -1. Customize new convenient buttons (academic shortcuts) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.) -For example: -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n", - - # Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugin - -Write powerful function plugins to perform any task you can and can't imagine. -The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide. -Please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details. - ---- -# Latest Update -## New feature dynamic - -1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML. - -2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения. -  -3. Модульный дизайн функций, простой интерфейс, но сильный функционал. - -4. Это проект с открытым исходным кодом, который может «сам переводить себя». - -5. Перевод других проектов с открытым исходным кодом - это не проблема. - -6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`). - -7. Поддержка большой языковой модели MOSS. - -8. Генерация изображений с помощью OpenAI. - -9. Анализ и подведение итогов аудиофайлов с помощью OpenAI. - -10. Полный цикл проверки правописания с использованием LaTeX. - -## Версии: -- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет) -- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата. -- Версия 3.3: добавлена функция объединения интернет-информации. -- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп). -- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api. -- Версия 3.0: поддержка chatglm и других небольших LLM. -- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов. -- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов. -- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов. -- Версия 2.3: улучшение многопоточной интерактивности. -- Версия 2.2: функции-плагины поддерживают горячую перезагрузку. -- Версия 2.1: раскрывающийся макет. -- Версия 2.0: использование модульных функций-плагинов. -- Версия 1.0: базовые функции. - -gpt_academic Разработчик QQ-группы-2: 610599535 - -- Известные проблемы - - Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения - - Высокая или низкая версия gradio может вызвать множество исключений - -## Ссылки и учебные материалы - -``` -Мы использовали многие концепты кода из других отличных проектов, включая: - -# Проект 1: Qinghua ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# Проект 2: Qinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# Проект 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Проект 4: Chuanhu ChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Проект 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Больше: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/WithFastapi.md b/docs/WithFastapi.md deleted file mode 100644 index 270375076a6d0cdae568180c48a619a40f3c0ccd..0000000000000000000000000000000000000000 --- a/docs/WithFastapi.md +++ /dev/null @@ -1,43 +0,0 @@ -# Running with fastapi - -We currently support fastapi in order to solve sub-path deploy issue. - -1. change CUSTOM_PATH setting in `config.py` - -``` sh -nano config.py -``` - -2. Edit main.py - -```diff - auto_opentab_delay() - - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") - + demo.queue(concurrency_count=CONCURRENT_COUNT) - - - # 如果需要在二级路径下运行 - - # CUSTOM_PATH = get_conf('CUSTOM_PATH') - - # if CUSTOM_PATH != "/": - - # from toolbox import run_gradio_in_subpath - - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) - - # else: - - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") - - + 如果需要在二级路径下运行 - + CUSTOM_PATH = get_conf('CUSTOM_PATH') - + if CUSTOM_PATH != "/": - + from toolbox import run_gradio_in_subpath - + run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) - + else: - + demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") - -if __name__ == "__main__": - main() -``` - - -3. Go! - -``` sh -python main.py -``` diff --git a/docs/demo.jpg b/docs/demo.jpg deleted file mode 100644 index fcffc19a0379d23291fd1da92c543df961206472..0000000000000000000000000000000000000000 Binary files a/docs/demo.jpg and /dev/null differ diff --git a/docs/demo2.jpg b/docs/demo2.jpg deleted file mode 100644 index 38fae48edc19253fea48ee235db8c7a0ccd697b6..0000000000000000000000000000000000000000 Binary files a/docs/demo2.jpg and /dev/null differ diff --git a/docs/logo.png b/docs/logo.png deleted file mode 100644 index 567dee1a957833b9a925cf709c6ebedce482bd43..0000000000000000000000000000000000000000 Binary files a/docs/logo.png and /dev/null differ diff --git a/docs/self_analysis.md b/docs/self_analysis.md deleted file mode 100644 index e34b905d7bb93ee6af9c730aef23052323c0489e..0000000000000000000000000000000000000000 --- a/docs/self_analysis.md +++ /dev/null @@ -1,378 +0,0 @@ -# chatgpt-academic项目自译解报告 -(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄) - - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\\_\_init\_\_.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | -| crazy_functions\代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| crazy_functions\图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| crazy_functions\对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| crazy_functions\总结word文档.py | 对输入的word文档进行摘要生成 | -| crazy_functions\总结音视频.py | 对输入的音视频文件进行摘要生成 | -| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 | -| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| crazy_functions\解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| crazy_functions\解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| crazy_functions\询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llms\test_llms.py | 对llm模型进行单元测试。 | - -## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py - -这个文件主要包含了五个函数: - -1. `check_proxy`:用于检查代理的有效性及地理位置,输出代理配置和所在地信息。 - -2. `backup_and_download`:用于备份当前版本并下载新版本。 - -3. `patch_and_restart`:用于覆盖更新当前版本并重新启动程序。 - -4. `get_current_version`:用于获取当前程序的版本号。 - -5. `auto_update`:用于自动检查新版本并提示用户更新。如果用户选择更新,则备份并下载新版本,覆盖更新当前版本并重新启动程序。如果更新失败,则输出错误信息,并不会向用户进行任何提示。 - -还有一个没有函数名的语句`os.environ['no_proxy'] = '*'`,用于设置环境变量,避免代理网络产生意外污染。 - -此外,该文件导入了以下三个模块/函数: - -- `requests` -- `shutil` -- `os` - -## [1/48] 请对下面的程序文件做一个概述: colorful.py - -该文件是一个Python脚本,用于在控制台中打印彩色文字。该文件包含了一些函数,用于以不同颜色打印文本。其中,红色、绿色、黄色、蓝色、紫色、靛色分别以函数 print红、print绿、print黄、print蓝、print紫、print靛 的形式定义;亮红色、亮绿色、亮黄色、亮蓝色、亮紫色、亮靛色分别以 print亮红、print亮绿、print亮黄、print亮蓝、print亮紫、print亮靛 的形式定义。它们使用 ANSI Escape Code 将彩色输出从控制台突出显示。如果运行在 Linux 操作系统上,文件所执行的操作被留空;否则,该文件导入了 colorama 库并调用 init() 函数进行初始化。最后,通过一系列条件语句,该文件通过将所有彩色输出函数的名称重新赋值为 print 函数的名称来避免输出文件的颜色问题。 - -## [2/48] 请对下面的程序文件做一个概述: config.py - -这个程序文件是用来配置和参数设置的。它包含了许多设置,如API key,使用代理,线程数,默认模型,超时时间等等。此外,它还包含了一些高级功能,如URL重定向等。这些设置将会影响到程序的行为和性能。 - -## [3/48] 请对下面的程序文件做一个概述: config_private.py - -这个程序文件是一个Python脚本,文件名为config_private.py。其中包含以下变量的赋值: - -1. API_KEY:API密钥。 -2. USE_PROXY:是否应用代理。 -3. proxies:如果使用代理,则设置代理网络的协议(socks5/http)、地址(localhost)和端口(11284)。 -4. DEFAULT_WORKER_NUM:默认的工作线程数量。 -5. SLACK_CLAUDE_BOT_ID:Slack机器人ID。 -6. SLACK_CLAUDE_USER_TOKEN:Slack用户令牌。 - -## [4/48] 请对下面的程序文件做一个概述: core_functional.py - -这是一个名为core_functional.py的源代码文件,该文件定义了一个名为get_core_functions()的函数,该函数返回一个字典,该字典包含了各种学术翻译润色任务的说明和相关参数,如颜色、前缀、后缀等。这些任务包括英语学术润色、中文学术润色、查找语法错误、中译英、学术中英互译、英译中、找图片和参考文献转Bib。其中,一些任务还定义了预处理函数用于处理任务的输入文本。 - -## [5/48] 请对下面的程序文件做一个概述: crazy_functional.py - -此程序文件(crazy_functional.py)是一个函数插件集合,包含了多个函数插件的定义和调用。这些函数插件旨在提供一些高级功能,如解析项目源代码、批量翻译PDF文档和Latex全文润色等。其中一些插件还支持热更新功能,不需要重启程序即可生效。文件中的函数插件按照功能进行了分类(第一组和第二组),并且有不同的调用方式(作为按钮或下拉菜单)。 - -## [6/48] 请对下面的程序文件做一个概述: main.py - -这是一个Python程序文件,文件名为main.py。该程序包含一个名为main的函数,程序会自动运行该函数。程序要求已经安装了gradio、os等模块,会根据配置文件加载代理、model、API Key等信息。程序提供了Chatbot功能,实现了一个对话界面,用户可以输入问题,然后Chatbot可以回答问题或者提供相关功能。程序还包含了基础功能区、函数插件区、更换模型 & SysPrompt & 交互界面布局、备选输入区,用户可以在这些区域选择功能和插件进行使用。程序中还包含了一些辅助模块,如logging等。 - -## [7/48] 请对下面的程序文件做一个概述: multi_language.py - -该文件multi_language.py是用于将项目翻译成不同语言的程序。它包含了以下函数和变量:lru_file_cache、contains_chinese、split_list、map_to_json、read_map_from_json、advanced_split、trans、trans_json、step_1_core_key_translate、CACHE_FOLDER、blacklist、LANG、TransPrompt、cached_translation等。注释和文档字符串提供了有关程序的说明,例如如何使用该程序,如何修改“LANG”和“TransPrompt”变量等。 - -## [8/48] 请对下面的程序文件做一个概述: theme.py - -这是一个Python源代码文件,文件名为theme.py。此文件中定义了一个函数adjust_theme,其功能是自定义gradio应用程序的主题,包括调整颜色、字体、阴影等。如果允许,则添加一个看板娘。此文件还包括变量advanced_css,其中包含一些CSS样式,用于高亮显示代码和自定义聊天框样式。此文件还导入了get_conf函数和gradio库。 - -## [9/48] 请对下面的程序文件做一个概述: toolbox.py - -toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和小工具函数,用于协助实现聊天机器人所需的各种功能,包括文本处理、功能插件加载、异常检测、Markdown格式转换,文件读写等等。此外,该库还包含一些依赖、参数配置等信息。该库易于理解和维护。 - -## [10/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_functions_test.py - -这个文件是一个Python测试模块,用于测试crazy_functions中的各种函数插件。这些函数包括:解析Python项目源代码、解析Cpp项目源代码、Latex全文润色、Markdown中译英、批量翻译PDF文档、谷歌检索小助手、总结word文档、下载arxiv论文并翻译摘要、联网回答问题、和解析Jupyter Notebooks。对于每个函数插件,都有一个对应的测试函数来进行测试。 - -## [11/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_utils.py - -这个Python文件中包括了两个函数: - -1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。 -2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。 - -这两个函数都依赖于从 `toolbox` 和 `request_llms` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 - -## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py - -这是一个Python程序文件,文件名为crazy_functions\Latex全文润色.py。文件包含了一个PaperFileGroup类和三个函数Latex英文润色,Latex中文润色和Latex英文纠错。程序使用了字符串处理、正则表达式、文件读写、多线程等技术,主要作用是对整个Latex项目进行润色和纠错。其中润色和纠错涉及到了对文本的语法、清晰度和整体可读性等方面的提升。此外,该程序还参考了第三方库,并封装了一些工具函数。 - -## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py - -这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llms` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 - -## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py - -这是一个Python模块的初始化文件(__init__.py),命名为"crazy_functions"。该模块包含了一些疯狂的函数,但该文件并没有实现这些函数,而是作为一个包(package)来导入其它的Python模块以实现这些函数。在该文件中,没有定义任何类或函数,它唯一的作用就是标识"crazy_functions"模块是一个包。 - -## [15/48] 请对下面的程序文件做一个概述: crazy_functions\下载arxiv论文翻译摘要.py - -这是一个 Python 程序文件,文件名为 `下载arxiv论文翻译摘要.py`。程序包含多个函数,其中 `下载arxiv论文并翻译摘要` 函数的作用是下载 `arxiv` 论文的 PDF 文件,提取摘要并使用 GPT 对其进行翻译。其他函数包括用于下载 `arxiv` 论文的 `download_arxiv_` 函数和用于获取文章信息的 `get_name` 函数,其中涉及使用第三方库如 requests, BeautifulSoup 等。该文件还包含一些用于调试和存储文件的代码段。 - -## [16/48] 请对下面的程序文件做一个概述: crazy_functions\代码重写为全英文_多线程.py - -该程序文件是一个多线程程序,主要功能是将指定目录下的所有Python代码文件中的中文内容转化为英文,并将转化后的代码存储到一个新的文件中。其中,程序使用了GPT-3等技术进行中文-英文的转化,同时也进行了一些Token限制下的处理,以防止程序发生错误。程序在执行过程中还会输出一些提示信息,并将所有转化过的代码文件存储到指定目录下。在程序执行结束后,还会生成一个任务执行报告,记录程序运行的详细信息。 - -## [17/48] 请对下面的程序文件做一个概述: crazy_functions\图片生成.py - -该程序文件提供了一个用于生成图像的函数`图片生成`。函数实现的过程中,会调用`gen_image`函数来生成图像,并返回图像生成的网址和本地文件地址。函数有多个参数,包括`prompt`(激励文本)、`llm_kwargs`(GPT模型的参数)、`plugin_kwargs`(插件模型的参数)等。函数核心代码使用了`requests`库向OpenAI API请求图像,并做了简单的处理和保存。函数还更新了交互界面,清空聊天历史并显示正在生成图像的消息和最终的图像网址和预览。 - -## [18/48] 请对下面的程序文件做一个概述: crazy_functions\对话历史存档.py - -这个文件是名为crazy_functions\对话历史存档.py的Python程序文件,包含了4个函数: - -1. write_chat_to_file(chatbot, history=None, file_name=None):用来将对话记录以Markdown格式写入文件中,并且生成文件名,如果没指定文件名则用当前时间。写入完成后将文件路径打印出来。 - -2. gen_file_preview(file_name):从传入的文件中读取内容,解析出对话历史记录并返回前100个字符,用于文件预览。 - -3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。 - -4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。 - -## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py - -该程序文件实现了一个总结Word文档的功能,使用Python的docx库读取docx格式的文件,使用pywin32库读取doc格式的文件。程序会先根据传入的txt参数搜索需要处理的文件,并逐个解析其中的内容,将内容拆分为指定长度的文章片段,然后使用另一个程序文件中的request_gpt_model_in_new_thread_with_ui_alive函数进行中文概述。最后将所有的总结结果写入一个文件中,并在界面上进行展示。 - -## [20/48] 请对下面的程序文件做一个概述: crazy_functions\总结音视频.py - -该程序文件包括两个函数:split_audio_file()和AnalyAudio(),并且导入了一些必要的库并定义了一些工具函数。split_audio_file用于将音频文件分割成多个时长相等的片段,返回一个包含所有切割音频片段文件路径的列表,而AnalyAudio用来分析音频文件,通过调用whisper模型进行音频转文字并使用GPT模型对音频内容进行概述,最终将所有总结结果写入结果文件中。 - -## [21/48] 请对下面的程序文件做一个概述: crazy_functions\批量Markdown翻译.py - -该程序文件名为`批量Markdown翻译.py`,包含了以下功能:读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译(英译中和中译英),整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。 - -## [22/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档.py - -该文件是一个Python脚本,名为crazy_functions\批量总结PDF文档.py。在导入了一系列库和工具函数后,主要定义了5个函数,其中包括一个错误处理装饰器(@CatchException),用于批量总结PDF文档。该函数主要实现对PDF文档的解析,并调用模型生成中英文摘要。 - -## [23/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档pdfminer.py - -该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。 - -## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py - -这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。 - -## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py - -该程序文件实现了一个名为“理解PDF文档内容”的函数,该函数可以为输入的PDF文件提取摘要以及正文各部分的主要内容,并在提取过程中根据上下文关系进行学术性问题解答。该函数依赖于多个辅助函数和第三方库,并在执行过程中针对可能出现的异常进行了处理。 - -## [26/48] 请对下面的程序文件做一个概述: crazy_functions\生成函数注释.py - -该程序文件是一个Python模块文件,文件名为“生成函数注释.py”,定义了两个函数:一个是生成函数注释的主函数“生成函数注释”,另一个是通过装饰器实现异常捕捉的函数“批量生成函数注释”。该程序文件依赖于“toolbox”和本地“crazy_utils”模块,并且在运行时使用了多线程技术和GPT模型来生成注释。函数生成的注释结果使用Markdown表格输出并写入历史记录文件。 - -## [27/48] 请对下面的程序文件做一个概述: crazy_functions\联网的ChatGPT.py - -这是一个名为`联网的ChatGPT.py`的Python程序文件,其中定义了一个函数`连接网络回答问题`。该函数通过爬取搜索引擎的结果和访问网页来综合回答给定的问题,并使用ChatGPT模型完成回答。此外,该文件还包括一些工具函数,例如从网页中抓取文本和使用代理访问网页。 - -## [28/48] 请对下面的程序文件做一个概述: crazy_functions\解析JupyterNotebook.py - -这个程序文件包含了两个函数: `parseNotebook()`和`解析ipynb文件()`,并且引入了一些工具函数和类。`parseNotebook()`函数将Jupyter Notebook文件解析为文本代码块,`解析ipynb文件()`函数则用于解析多个Jupyter Notebook文件,使用`parseNotebook()`解析每个文件和一些其他的处理。函数中使用了多线程处理输入和输出,并且将结果写入到文件中。 - -## [29/48] 请对下面的程序文件做一个概述: crazy_functions\解析项目源代码.py - -这是一个源代码分析的Python代码文件,其中定义了多个函数,包括解析一个Python项目、解析一个C项目、解析一个C项目的头文件和解析一个Java项目等。其中解析源代码新函数是实际处理源代码分析并生成报告的函数。该函数首先会逐个读取传入的源代码文件,生成对应的请求内容,通过多线程发送到chatgpt进行分析。然后将结果写入文件,并进行汇总分析。最后通过调用update_ui函数刷新界面,完整实现了源代码的分析。 - -## [30/48] 请对下面的程序文件做一个概述: crazy_functions\询问多个大语言模型.py - -该程序文件包含两个函数:同时问询()和同时问询_指定模型(),它们的作用是使用多个大语言模型同时对用户输入进行处理,返回对应模型的回复结果。同时问询()会默认使用ChatGPT和ChatGLM两个模型,而同时问询_指定模型()则可以指定要使用的模型。该程序文件还引用了其他的模块和函数库。 - -## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py - -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。 - -## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py - -该文件是一个Python模块,文件名为“谷歌检索小助手.py”。该模块包含两个函数,一个是“get_meta_information()”,用于从提供的网址中分析出所有相关的学术文献的元数据信息;另一个是“谷歌检索小助手()”,是主函数,用于分析用户提供的谷歌学术搜索页面中出现的文章,并提取相关信息。其中,“谷歌检索小助手()”函数依赖于“get_meta_information()”函数,并调用了其他一些Python模块,如“arxiv”、“math”、“bs4”等。 - -## [33/48] 请对下面的程序文件做一个概述: crazy_functions\高级功能函数模板.py - -该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。 - -## [34/48] 请对下面的程序文件做一个概述: request_llms\bridge_all.py - -该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。 - -## [35/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatglm.py - -这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。 - -## [36/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatgpt.py - -该文件为 Python 代码文件,文件名为 request_llms\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 - -## [37/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_llama.py - -该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分: -1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。 -2. predict_no_ui_long_connection 函数:一个多线程方法,用于在后台运行聊天机器人。 -3. predict 函数:一个单线程方法,用于在前端页面上交互式调用聊天机器人,以获取用户输入并返回相应的回复。 - -这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。 - -## [38/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_pangualpha.py - -这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。 - -## [39/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_rwkv.py - -这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。 - -## [40/48] 请对下面的程序文件做一个概述: request_llms\bridge_moss.py - -该文件为一个Python源代码文件,文件名为 request_llms\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 - -GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。 - -函数 predict_no_ui_long_connection 是多线程方法,调用 GetGLMHandle 类加载 MOSS 参数后使用 stream_chat 实现主进程和子进程的交互过程。 - -函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。 - -## [41/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbing.py - -这是一个名为`bridge_newbing.py`的程序文件,包含三个部分: - -第一部分使用from语句导入了`edge_gpt`模块的`NewbingChatbot`类。 - -第二部分定义了一个名为`NewBingHandle`的继承自进程类的子类,该类会检查依赖性并启动进程。同时,该部分还定义了一个名为`predict_no_ui_long_connection`的多线程方法和一个名为`predict`的单线程方法,用于与NewBing进行通信。 - -第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。 - -## [42/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbingfree.py - -这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。 - -## [43/48] 请对下面的程序文件做一个概述: request_llms\bridge_stackclaude.py - -这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分: - -第一部分定义了Slack API Client类,实现Slack消息的发送、接收、循环监听,用于与Slack API进行交互。 - -第二部分定义了ClaudeHandle类,继承Process类,用于创建子进程Worker,调用主体,实现Claude与用户交互的功能。 - -第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。 - -## [44/48] 请对下面的程序文件做一个概述: request_llms\bridge_tgui.py - -该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。 - -## [45/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt.py - -该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。 - -## [46/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt_free.py - -该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。 - -## [47/48] 请对下面的程序文件做一个概述: request_llms\test_llms.py - -这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llms.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 - -## 用一张Markdown表格简要描述以下文件的功能: -check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\__init__.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | - -这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能简述 | -| --- | --- | -| 代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| 图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| 对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| 总结word文档.py | 对输入的word文档进行摘要生成 | -| 总结音视频.py | 对输入的音视频文件进行摘要生成 | -| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| 生成函数注释.py | 自动生成Python函数的注释 | -| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| 解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| 解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| 询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| 读文章写摘要.py | 对论文进行解析和全文摘要生成 | - -概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llms\bridge_all.py, request_llms\bridge_chatglm.py, request_llms\bridge_chatgpt.py, request_llms\bridge_jittorllms_llama.py, request_llms\bridge_jittorllms_pangualpha.py, request_llms\bridge_jittorllms_rwkv.py, request_llms\bridge_moss.py, request_llms\bridge_newbing.py, request_llms\bridge_newbingfree.py, request_llms\bridge_stackclaude.py, request_llms\bridge_tgui.py, request_llms\edge_gpt.py, request_llms\edge_gpt_free.py, request_llms\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| --- | --- | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llms\test_llms.py | 对llm模型进行单元测试。 | -| 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 | diff --git a/docs/test_markdown_format.py b/docs/test_markdown_format.py deleted file mode 100644 index 8255478474a379fafd43a40278e6a8e802aae4f0..0000000000000000000000000000000000000000 --- a/docs/test_markdown_format.py +++ /dev/null @@ -1,167 +0,0 @@ -sample = """ -[1]: https://baike.baidu.com/item/%E8%B4%A8%E8%83%BD%E6%96%B9%E7%A8%8B/1884527 "质能方程(质能方程式)_百度百科" -[2]: https://www.zhihu.com/question/348249281 "如何理解质能方程 E=mc²? - 知乎" -[3]: https://zhuanlan.zhihu.com/p/32597385 "质能方程的推导与理解 - 知乎 - 知乎专栏" - -你好,这是必应。质能方程是描述质量与能量之间的当量关系的方程[^1^][1]。用tex格式,质能方程可以写成$$E=mc^2$$,其中$E$是能量,$m$是质量,$c$是光速[^2^][2] [^3^][3]。 -""" -import re - - -def preprocess_newbing_out(s): - pattern = r"\^(\d+)\^" # 匹配^数字^ - pattern2 = r"\[(\d+)\]" # 匹配^数字^ - - def sub(m): - return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值 - - result = re.sub(pattern, sub, s) # 替换操作 - if "[1]" in result: - result += ( - '


' - + "
".join( - [ - re.sub(pattern2, sub, r) - for r in result.split("\n") - if r.startswith("[") - ] - ) - + "
" - ) - return result - - -def close_up_code_segment_during_stream(gpt_reply): - """ - 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的``` - - Args: - gpt_reply (str): GPT模型返回的回复字符串。 - - Returns: - str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。 - - """ - if "```" not in gpt_reply: - return gpt_reply - if gpt_reply.endswith("```"): - return gpt_reply - - # 排除了以上两个情况,我们 - segments = gpt_reply.split("```") - n_mark = len(segments) - 1 - if n_mark % 2 == 1: - # print('输出代码片段中!') - return gpt_reply + "\n```" - else: - return gpt_reply - - -import markdown -from latex2mathml.converter import convert as tex2mathml - - -def markdown_convertion(txt): - """ - 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。 - """ - pre = '
' - suf = "
" - if txt.startswith(pre) and txt.endswith(suf): - # print('警告,输入了已经经过转化的字符串,二次转化可能出问题') - return txt # 已经被转化过,不需要再次转化 - - markdown_extension_configs = { - "mdx_math": { - "enable_dollar_delimiter": True, - "use_gitlab_delimiters": False, - }, - } - find_equation_pattern = r'\n", "") - return content - - if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识 - # convert everything to html format - split = markdown.markdown(text="---") - convert_stage_1 = markdown.markdown( - text=txt, - extensions=["mdx_math", "fenced_code", "tables", "sane_lists"], - extension_configs=markdown_extension_configs, - ) - convert_stage_1 = markdown_bug_hunt(convert_stage_1) - # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s). - # 1. convert to easy-to-copy tex (do not render math) - convert_stage_2_1, n = re.subn( - find_equation_pattern, - replace_math_no_render, - convert_stage_1, - flags=re.DOTALL, - ) - # 2. convert to rendered equation - convert_stage_2_2, n = re.subn( - find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL - ) - # cat them together - return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf - else: - return ( - pre - + markdown.markdown( - txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"] - ) - + suf - ) - - -sample = preprocess_newbing_out(sample) -sample = close_up_code_segment_during_stream(sample) -sample = markdown_convertion(sample) -with open("tmp.html", "w", encoding="utf8") as f: - f.write( - """ - - - My Website - - - - """ - ) - f.write(sample) diff --git a/docs/translate_english.json b/docs/translate_english.json deleted file mode 100644 index c7e0e66d74a992aa38d1b36bc2eb2e5934200ab9..0000000000000000000000000000000000000000 --- a/docs/translate_english.json +++ /dev/null @@ -1,3010 +0,0 @@ -{ - "print亮黄": "PrintBrightYellow", - "print亮绿": "PrintBrightGreen", - "print亮红": "PrintBrightRed", - "print红": "PrintRed", - "print绿": "PrintGreen", - "print黄": "PrintYellow", - "print蓝": "PrintBlue", - "print紫": "PrintPurple", - "print靛": "PrintIndigo", - "print亮蓝": "PrintBrightBlue", - "print亮紫": "PrintBrightPurple", - "print亮靛": "PrintBrightIndigo", - "读文章写摘要": "ReadArticleWriteSummary", - "批量生成函数注释": "BatchGenerateFunctionComments", - "生成函数注释": "GenerateFunctionComments", - "解析项目本身": "ParseProjectItself", - "解析项目源代码": "ParseProjectSourceCode", - "解析一个Python项目": "ParsePythonProject", - "解析一个C项目的头文件": "ParseCProjectHeaderFiles", - "解析一个C项目": "ParseCProject", - "解析一个Golang项目": "ParseGolangProject", - "解析一个Rust项目": "ParseRustProject", - "解析一个Java项目": "ParseJavaProject", - "解析一个前端项目": "ParseFrontendProject", - "高阶功能模板函数": "HighOrderFunctionTemplateFunctions", - "高级功能函数模板": "AdvancedFunctionTemplate", - "全项目切换英文": "SwitchToEnglishForTheWholeProject", - "代码重写为全英文_多线程": "RewriteCodeToEnglish_MultiThreaded", - "Latex英文润色": "EnglishProofreadingForLatex", - "Latex全文润色": "FullTextProofreadingForLatex", - "同时问询": "SimultaneousInquiry", - "询问多个大语言模型": "InquiryMultipleLargeLanguageModels", - "解析一个Lua项目": "ParsingLuaProject", - "解析一个CSharp项目": "ParsingCSharpProject", - "总结word文档": "SummarizingWordDocuments", - "解析ipynb文件": "ParsingIpynbFiles", - "解析JupyterNotebook": "ParsingJupyterNotebook", - "对话历史存档": "ConversationHistoryArchive", - "载入对话历史存档": "LoadConversationHistoryArchive", - "删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords", - "Markdown英译中": "TranslateMarkdownFromEnglishToChinese", - "批量Markdown翻译": "BatchTranslateMarkdown", - "批量总结PDF文档": "BatchSummarizePDFDocuments", - "批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPdfminer", - "批量翻译PDF文档": "BatchTranslatePDFDocuments", - "批量翻译PDF文档_多线程": "BatchTranslatePDFDocuments_MultiThreaded", - "谷歌检索小助手": "GoogleSearchAssistant", - "理解PDF文档内容标准文件输入": "UnderstandPdfDocumentContentStandardFileInput", - "理解PDF文档内容": "UnderstandPdfDocumentContent", - "Latex中文润色": "LatexChineseProofreading", - "Latex中译英": "LatexChineseToEnglish", - "Latex全文翻译": "LatexFullTextTranslation", - "Latex英译中": "LatexEnglishToChinese", - "Markdown中译英": "MarkdownChineseToEnglish", - "下载arxiv论文并翻译摘要": "DownloadArxivPaperAndTranslateAbstract", - "下载arxiv论文翻译摘要": "DownloadArxivPaperTranslateAbstract", - "连接网络回答问题": "ConnectToNetworkToAnswerQuestions", - "联网的ChatGPT": "ChatGPTConnectedToNetwork", - "解析任意code项目": "ParseAnyCodeProject", - "读取知识库作答": "ReadKnowledgeArchiveAnswerQuestions", - "知识库问答": "UpdateKnowledgeArchive", - "同时问询_指定模型": "InquireSimultaneously_SpecifiedModel", - "图片生成": "ImageGeneration", - "test_解析ipynb文件": "Test_ParseIpynbFile", - "把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline", - "清理多余的空行": "CleanUpExcessBlankLines", - "合并小写开头的段落块": "MergeLowercaseStartingParagraphBlocks", - "多文件润色": "ProofreadMultipleFiles", - "多文件翻译": "TranslateMultipleFiles", - "解析docx": "ParseDocx", - "解析PDF": "ParsePDF", - "解析Paper": "ParsePaper", - "ipynb解释": "IpynbExplanation", - "解析源代码新": "ParsingSourceCodeNew", - "避免代理网络产生意外污染": "Avoid unexpected pollution caused by proxy networks", - "无": "None", - "查询代理的地理位置": "Query the geographic location of the proxy", - "返回的结果是": "The returned result is", - "代理配置": "Proxy configuration", - "代理所在地": "Location of the proxy", - "未知": "Unknown", - "IP查询频率受限": "IP query frequency is limited", - "代理所在地查询超时": "Timeout when querying the location of the proxy", - "代理可能无效": "Proxy may be invalid", - "一键更新协议": "One-click protocol update", - "备份和下载": "Backup and download", - "覆盖和重启": "Overwrite and restart", - "由于您没有设置config_private.py私密配置": "Since you have not set the config_private.py private configuration", - "现将您的现有配置移动至config_private.py以防止配置丢失": "Now move your existing configuration to config_private.py to prevent configuration loss", - "另外您可以随时在history子文件夹下找回旧版的程序": "In addition, you can always retrieve the old version of the program in the history subfolder", - "代码已经更新": "Code has been updated", - "即将更新pip包依赖……": "Will update pip package dependencies soon...", - "pip包依赖安装出现问题": "Problem occurred during installation of pip package dependencies", - "需要手动安装新增的依赖库": "Need to manually install the newly added dependency library", - "然后在用常规的": "Then use the regular", - "的方式启动": "way to start", - "更新完成": "Update completed", - "您可以随时在history子文件夹下找回旧版的程序": "You can always retrieve the old version of the program in the history subfolder", - "5s之后重启": "Restart after 5 seconds", - "假如重启失败": "If restart fails", - "您可能需要手动安装新增的依赖库": "You may need to manually install new dependencies", - "查询版本和用户意见": "Check version and user feedback", - "新功能": "New features", - "新版本可用": "New version available", - "新版本": "New version", - "当前版本": "Current version", - "Github更新地址": "Github update address", - "是否一键更新代码": "Update code with one click?", - "Y+回车=确认": "Y+Enter=Confirm", - "输入其他/无输入+回车=不更新": "Enter other/No input+Enter=No update", - "更新失败": "Update failed", - "自动更新程序": "Automatic update program", - "已禁用": "Disabled", - "正在执行一些模块的预热": "Some modules are being preheated", - "模块预热": "Module preheating", - "例如": "For example", - "此key无效": "This key is invalid", - "可同时填写多个API-KEY": "Multiple API-KEYs can be filled in at the same time", - "用英文逗号分割": "Separated by commas", - "改为True应用代理": "Change to True to apply proxy", - "如果直接在海外服务器部署": "If deployed directly on overseas servers", - "此处不修改": "Do not modify here", - "填写格式是": "Format for filling in is", - "协议": "Protocol", - "地址": "Address", - "端口": "Port", - "填写之前不要忘记把USE_PROXY改成True": "Don't forget to change USE_PROXY to True before filling in", - "常见协议无非socks5h/http": "Common protocols are nothing but socks5h/http", - "例如 v2**y 和 ss* 的默认本地协议是socks5h": "For example, the default local protocol for v2**y and ss* is socks5h", - "而cl**h 的默认本地协议是http": "While the default local protocol for cl**h is http", - "懂的都懂": "Those who understand, understand", - "不懂就填localhost或者127.0.0.1肯定错不了": "If you don't understand, just fill in localhost or 127.0.0.1 and you won't go wrong", - "localhost意思是代理软件安装在本机上": "localhost means that the proxy software is installed on the local machine", - "在代理软件的设置里找": "Look for it in the settings of the proxy software", - "虽然不同的代理软件界面不一样": "Although the interface of different proxy software is different", - "但端口号都应该在最显眼的位置上": "But the port number should be in the most prominent position", - "代理网络的地址": "Address of the proxy network", - "打开你的*学*网软件查看代理的协议": "Open your *learning* software to view the proxy protocol", - "、地址": "and address", - "和端口": "and port", - "多线程函数插件中": "In the multi-threaded function plugin", - "默认允许多少路线程同时访问OpenAI": "How many threads are allowed to access OpenAI at the same time by default", - "Free trial users的限制是每分钟3次": "The limit for free trial users is 3 times per minute", - "Pay-as-you-go users的限制是每分钟3500次": "The limit for Pay-as-you-go users is 3500 times per minute", - "一言以蔽之": "In short", - "免费用户填3": "Free users should fill in 3", - "设置用户名和密码": "Set username and password", - "相关功能不稳定": "Related functions are unstable", - "与gradio版本和网络都相关": "Related to gradio version and network", - "如果本地使用不建议加这个": "Not recommended to add this for local use", - "重新URL重新定向": "Redirect URL", - "实现更换API_URL的作用": "Realize the function of changing API_URL", - "常规情况下": "Under normal circumstances", - "不要修改!!": "Do not modify!!", - "高危设置!通过修改此设置": "High-risk setting! By modifying this setting", - "您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!": "You will completely expose your API-KEY and conversation privacy to the middleman you set!", - "如果需要在二级路径下运行": "If you need to run under the second-level path", - "需要配合修改main.py才能生效!": "Need to be modified in conjunction with main.py to take effect!", - "如果需要使用newbing": "If you need to use newbing", - "把newbing的长长的cookie放到这里": "Put the long cookie of newbing here", - "sk-此处填API密钥": "sk-Fill in API key here", - "默认按钮颜色是 secondary": "The default button color is secondary", - "前言": "Preface", - "后语": "Postscript", - "按钮颜色": "Button color", - "预处理": "Preprocessing", - "清除换行符": "Remove line breaks", - "英语学术润色": "English academic polishing", - "中文学术润色": "Chinese academic polishing", - "查找语法错误": "Find syntax errors", - "中译英": "Chinese to English translation", - "学术中英互译": "Academic Chinese-English Translation", - "英译中": "English to Chinese translation", - "找图片": "Find image", - "解释代码": "Explain code", - "作为一名中文学术论文写作改进助理": "As a Chinese academic paper writing improvement assistant", - "你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性": "Your task is to improve the spelling, grammar, clarity, conciseness and overall readability of the provided text", - "同时分解长句": "Also, break down long sentences", - "减少重复": "Reduce repetition", - "并提供改进建议": "And provide improvement suggestions", - "请只提供文本的更正版本": "Please only provide corrected versions of the text", - "避免包括解释": "Avoid including explanations", - "请编辑以下文本": "Please edit the following text", - "翻译成地道的中文": "Translate into authentic Chinese", - "我需要你找一张网络图片": "I need you to find a web image", - "使用Unsplash API": "Use Unsplash API", - "英语关键词": "English keywords", - "获取图片URL": "Get image URL", - "然后请使用Markdown格式封装": "Then please wrap it in Markdown format", - "并且不要有反斜线": "And do not use backslashes", - "不要用代码块": "Do not use code blocks", - "现在": "Now", - "请按以下描述给我发送图片": "Please send me the image following the description below", - "请解释以下代码": "Please explain the following code", - "HotReload 的意思是热更新": "HotReload means hot update", - "修改函数插件后": "After modifying the function plugin", - "不需要重启程序": "No need to restart the program", - "代码直接生效": "The code takes effect directly", - "第一组插件": "First group of plugins", - "调用时": "When calling", - "唤起高级参数输入区": "Invoke the advanced parameter input area", - "默认False": "Default is False", - "高级参数输入区的显示提示": "Display prompt in the advanced parameter input area", - "加入下拉菜单中": "Add to the drop-down menu", - "修改函数插件代码后": "After modifying the function plugin code", - "第二组插件": "Second group of plugins", - "经过充分测试": "Fully tested", - "第三组插件": "Third group of plugins", - "尚未充分测试的函数插件": "Function plugins that have not been fully tested yet", - "放在这里": "Put it here", - "第n组插件": "Nth group of plugins", - "解析整个Python项目": "Parse the entire Python project", - "先上传存档或输入路径": "Upload archive or enter path first", - "请谨慎操作": "Please operate with caution", - "测试功能": "Test function", - "解析Jupyter Notebook文件": "Parse Jupyter Notebook files", - "批量总结Word文档": "Batch summarize Word documents", - "解析整个C++项目头文件": "Parse the entire C++ project header file", - "解析整个C++项目": "Parse the entire C++ project", - "解析整个Go项目": "Parse the entire Go project", - "解析整个Rust项目": "Parse the entire Go project", - "解析整个Java项目": "Parse the entire Java project", - "解析整个前端项目": "Parse the entire front-end project", - "css等": "CSS, etc.", - "解析整个Lua项目": "Parse the entire Lua project", - "解析整个CSharp项目": "Parse the entire C# project", - "读Tex论文写摘要": "Read Tex paper and write abstract", - "Markdown/Readme英译中": "Translate Markdown/Readme from English to Chinese", - "保存当前的对话": "Save the current conversation", - "多线程Demo": "Multithreading demo", - "解析此项目本身": "Parse this project itself", - "源码自译解": "Translate the source code", - "老旧的Demo": "Old demo", - "把本项目源代码切换成全英文": "Switch the source code of this project to English", - "插件demo": "Plugin demo", - "历史上的今天": "Today in history", - "若输入0": "If 0 is entered", - "则不解析notebook中的Markdown块": "Do not parse Markdown blocks in the notebook", - "多线程": "Multithreading", - "询问多个GPT模型": "Inquire multiple GPT models", - "谷歌学术检索助手": "Google Scholar search assistant", - "输入谷歌学术搜索页url": "Enter the URL of Google Scholar search page", - "模仿ChatPDF": "Imitate ChatPDF", - "英文Latex项目全文润色": "English Latex project full text proofreading", - "输入路径或上传压缩包": "Input path or upload compressed package", - "中文Latex项目全文润色": "Chinese Latex project full text proofreading", - "Latex项目全文中译英": "Latex project full text translation from Chinese to English", - "Latex项目全文英译中": "Latex project full text translation from English to Chinese", - "批量MarkdownChineseToEnglish": "Batch Markdown Chinese to English", - "一键DownloadArxivPaperAndTranslateAbstract": "One-click Download Arxiv Paper and Translate Abstract", - "先在input输入编号": "Enter the number in input first", - "如1812.10695": "e.g. 1812.10695", - "先输入问题": "Enter the question first", - "再点击按钮": "Then click the button", - "需要访问谷歌": "Access to Google is required", - "手动指定和筛选源代码文件类型": "Manually specify and filter the source code file type", - "输入时用逗号隔开": "Separate with commas when entering", - "*代表通配符": "* stands for wildcard", - "加了^代表不匹配": "Adding ^ means not matching", - "不输入代表全部匹配": "Not entering means matching all", - "手动指定询问哪些模型": "Manually specify which models to ask", - "支持任意数量的llm接口": "Support any number of llm interfaces", - "用&符号分隔": "Separate with & symbol", - "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "e.g. chatglm&gpt-3.5-turbo&api2d-gpt-4", - "先切换模型到openai或api2d": "Switch the model to openai or api2d first", - "在这里输入分辨率": "Enter the resolution here", - "如1024x1024": "e.g. 1024x1024", - "默认": "Default", - "建议您复制一个config_private.py放自己的秘密": "We suggest you to copy a config_private.py file to keep your secrets, such as API and proxy URLs, from being accidentally uploaded to Github and seen by others.", - "如API和代理网址": "Such as API and proxy URLs", - "避免不小心传github被别人看到": "Avoid being accidentally uploaded to Github and seen by others", - "如果WEB_PORT是-1": "If WEB_PORT is -1", - "则随机选取WEB端口": "then a random port will be selected for WEB", - "问询记录": "Inquiry record", - "python 版本建议3.9+": "Python version recommended 3.9+", - "越新越好": "The newer the better", - "一些普通功能模块": "Some common functional modules", - "高级函数插件": "Advanced function plugins", - "处理markdown文本格式的转变": "Transformation of markdown text format", - "做一些外观色彩上的调整": "Make some adjustments in appearance and color", - "代理与自动更新": "Proxy and automatic update", - "功能区显示开关与功能区的互动": "Interaction between display switch and function area", - "整理反复出现的控件句柄组合": "Organize repeated control handle combinations", - "提交按钮、重置按钮": "Submit button, reset button", - "基础功能区的回调函数注册": "Registration of callback functions in basic function area", - "文件上传区": "File upload area", - "接收文件后与chatbot的互动": "Interaction with chatbot after receiving files", - "函数插件-固定按钮区": "Function plugin - fixed button area", - "函数插件-下拉菜单与随变按钮的互动": "Interaction between dropdown menu and dynamic button in function plugin", - "是否唤起高级插件参数区": "Whether to call the advanced plugin parameter area", - "随变按钮的回调函数注册": "Registration of callback functions for dynamic buttons", - "终止按钮的回调函数注册": "Callback function registration for the stop button", - "gradio的inbrowser触发不太稳定": "In-browser triggering of gradio is not very stable", - "回滚代码到原始的浏览器打开函数": "Roll back code to the original browser open function", - "打开浏览器": "Open browser", - "ChatGPT 学术优化": "ChatGPT academic optimization", - "代码开源和更新": "Code open source and updates", - "地址🚀": "Address 🚀", - "感谢热情的": "Thanks to the enthusiastic", - "开发者们❤️": "Developers ❤️", - "请注意自我隐私保护哦!": "Please pay attention to self-privacy protection!", - "当前模型": "Current model", - "输入区": "Input area", - "提交": "Submit", - "重置": "Reset", - "停止": "Stop", - "清除": "Clear", - "按Enter提交": "Submit by pressing Enter", - "按Shift+Enter换行": "Press Shift+Enter to line break", - "基础功能区": "Basic function area", - "函数插件区": "Function plugin area", - "注意": "Attention", - "以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "The function plugins marked in 'red' below need to read the path from the input area as a parameter", - "更多函数插件": "More function plugins", - "打开插件列表": "Open plugin list", - "高级参数输入区": "Advanced parameter input area", - "这里是特殊函数插件的高级参数输入区": "Here is the advanced parameter input area for special function plugins", - "请先从插件列表中选择": "Please select from the plugin list first", - "点击展开“文件上传区”": "Click to expand the 'file upload area'", - "上传本地文件可供红色函数插件调用": "Upload local files for red function plugins to use", - "任何文件": "Any file", - "但推荐上传压缩文件": "But it is recommended to upload compressed files", - "更换模型 & SysPrompt & 交互界面布局": "Change model & SysPrompt & interactive interface layout", - "浮动输入区": "Floating input area", - "输入清除键": "Input clear key", - "插件参数区": "Plugin parameter area", - "显示/隐藏功能区": "Show/hide function area", - "更换LLM模型/请求源": "Change LLM model/request source", - "备选输入区": "Alternative input area", - "输入区2": "Input area 2", - "已重置": "Reset", - "插件": "Plugin", - "的高级参数说明": "Advanced parameter description for plugin", - "没有提供高级参数功能说明": "No advanced parameter function description provided", - "不需要高级参数": "No advanced parameters needed", - "如果浏览器没有自动打开": "If the browser does not open automatically", - "请复制并转到以下URL": "Please copy and go to the following URL", - "亮色主题": "Light theme", - "暗色主题": "Dark theme", - "一-鿿": "One-click", - "GPT输出格式错误": "GPT output format error", - "稍后可能需要再试一次": "May need to try again later", - "gradio可用颜色列表": "Gradio available color list", - "石板色": "Slate color", - "灰色": "Gray", - "锌色": "Zinc color", - "中性色": "Neutral color", - "石头色": "Stone color", - "红色": "Red", - "橙色": "Orange", - "琥珀色": "Amber", - "黄色": "Yellow", - "酸橙色": "Lime color", - "绿色": "Green", - "祖母绿": "Turquoise", - "青蓝色": "Cyan blue", - "青色": "Cyan", - "天蓝色": "Sky blue", - "蓝色": "Blue", - "靛蓝色": "Indigo", - "紫罗兰色": "Violet", - "紫色": "Purple", - "洋红色": "Magenta", - "粉红色": "Pink", - "玫瑰色": "Rose", - "添加一个萌萌的看板娘": "Add a cute mascot", - "gradio版本较旧": "Gradio version is outdated", - "不能自定义字体和颜色": "Cannot customize font and color", - "引入一个有cookie的chatbot": "Introduce a chatbot with cookies", - "刷新界面": "Refresh the page", - "稍微留一点余地": "Leave a little room", - "否则在回复时会因余量太少出问题": "Otherwise, there will be problems with insufficient space when replying", - "这个bug没找到触发条件": "The trigger condition for this bug has not been found", - "暂时先这样顶一下": "Temporarily handle it this way", - "使用 lru缓存 加快转换速度": "Use LRU cache to speed up conversion", - "输入了已经经过转化的字符串": "Input a string that has already been converted", - "已经被转化过": "Has already been converted", - "不需要再次转化": "No need to convert again", - "有$标识的公式符号": "Formula symbol with $ sign", - "且没有代码段": "And there is no code section", - "的标识": "Identifier of", - "排除了以上两个情况": "Exclude the above two cases", - "我们": "We", - "输入部分太自由": "The input part is too free", - "预处理一波": "Preprocess it", - "当代码输出半截的时候": "When the code output is halfway", - "试着补上后个": "Try to fill in the latter", - "第三方库": "Third-party library", - "需要预先pip install rarfile": "Need to pip install rarfile in advance", - "此外": "In addition", - "Windows上还需要安装winrar软件": "WinRAR software needs to be installed on Windows", - "配置其Path环境变量": "Configure its Path environment variable", - "需要预先pip install py7zr": "Need to pip install py7zr in advance", - "随机负载均衡": "Random load balancing", - "优先级1. 获取环境变量作为配置": "Priority 1. Get environment variables as configuration", - "读取默认值作为数据类型转换的参考": "Read the default value as a reference for data type conversion", - "优先级2. 获取config_private中的配置": "Priority 2. Get the configuration in config_private", - "优先级3. 获取config中的配置": "Priority 3. Get the configuration in config", - "在读取API_KEY时": "When reading API_KEY", - "检查一下是不是忘了改config": "Check if you forgot to change the config", - "当输入部分的token占比小于限制的3/4时": "When the token proportion of the input part is less than 3/4 of the limit", - "裁剪时": "When trimming", - "1. 把input的余量留出来": "1. Leave the surplus of input", - "2. 把输出用的余量留出来": "2. Leave the surplus used for output", - "3. 如果余量太小了": "3. If the surplus is too small", - "直接清除历史": "Clear the history directly", - "当输入部分的token占比": "When the token proportion of the input part", - "限制的3/4时": "is 3/4 of the limit", - "截断时的颗粒度": "Granularity when truncating", - "第一部分": "First part", - "函数插件输入输出接驳区": "Function plugin input and output docking area", - "带Cookies的Chatbot类": "Chatbot class with cookies", - "为实现更多强大的功能做基础": "Laying the foundation for implementing more powerful functions", - "装饰器函数": "Decorator function", - "用于重组输入参数": "Used to restructure input parameters", - "改变输入参数的顺序与结构": "Change the order and structure of input parameters", - "刷新界面用 yield from update_ui": "Refresh the interface using yield from update_ui", - "将插件中出的所有问题显示在界面上": "Display all questions from the plugin on the interface", - "实现插件的热更新": "Implement hot update of the plugin", - "打印traceback": "Print traceback", - "为了安全而隐藏绝对地址": "Hide absolute address for security reasons", - "正常": "Normal", - "刷新用户界面": "Refresh the user interface", - "在传递chatbot的过程中不要将其丢弃": "Do not discard it when passing the chatbot", - "必要时": "If necessary", - "可用clear将其清空": "It can be cleared with clear if necessary", - "然后用for+append循环重新赋值": "Then reassign with for+append loop", - "捕捉函数f中的异常并封装到一个生成器中返回": "Capture exceptions in function f and encapsulate them into a generator to return", - "并显示到聊天当中": "And display it in the chat", - "插件调度异常": "Plugin scheduling exception", - "异常原因": "Exception reason", - "当前代理可用性": "Current proxy availability", - "异常": "Exception", - "将文本按照段落分隔符分割开": "Split the text into paragraphs according to the paragraph separator", - "生成带有段落标签的HTML代码": "Generate HTML code with paragraph tags", - "用多种方式组合": "Combine in various ways", - "将markdown转化为好看的html": "Convert markdown to nice-looking HTML", - "接管gradio默认的markdown处理方式": "Take over the default markdown handling of gradio", - "处理文件的上传": "Handle file uploads", - "自动解压": "Automatically decompress", - "将生成的报告自动投射到文件上传区": "Automatically project the generated report to the file upload area", - "当历史上下文过长时": "Automatically truncate when the historical context is too long", - "自动截断": "Automatic truncation", - "获取设置": "Get settings", - "根据当前的模型类别": "According to the current model category", - "抽取可用的api-key": "Extract available API keys", - "* 此函数未来将被弃用": "* This function will be deprecated in the future", - "不详": "Unknown", - "将对话记录history以Markdown格式写入文件中": "Write the conversation record history to a file in Markdown format", - "如果没有指定文件名": "If no file name is specified", - "则使用当前时间生成文件名": "Generate a file name using the current time", - "chatGPT分析报告": "chatGPT analysis report", - "chatGPT 分析报告": "chatGPT analysis report", - "以上材料已经被写入": "The above materials have been written", - "向chatbot中添加错误信息": "Add error information to the chatbot", - "将Markdown格式的文本转换为HTML格式": "Convert Markdown format text to HTML format", - "如果包含数学公式": "If it contains mathematical formulas", - "则先将公式转换为HTML格式": "Convert the formula to HTML format first", - "解决一个mdx_math的bug": "Fix a bug in mdx_math", - "单$包裹begin命令时多余": "Redundant when wrapping begin command with single $", - "在gpt输出代码的中途": "In the middle of outputting code with GPT", - "输出了前面的": "Output the front part", - "但还没输出完后面的": "But haven't output the back part yet", - "补上后面的": "Complete the back part", - "GPT模型返回的回复字符串": "Reply string returned by GPT model", - "返回一个新的字符串": "Return a new string", - "将输出代码片段的“后面的": "Append the back part of output code snippet", - "”补上": "to it", - "将输入和输出解析为HTML格式": "Parse input and output as HTML format", - "将y中最后一项的输入部分段落化": "Paragraphize the input part of the last item in y", - "并将输出部分的Markdown和数学公式转换为HTML格式": "And convert the output part of Markdown and math formulas to HTML format", - "返回当前系统中可用的未使用端口": "Return an available unused port in the current system", - "需要安装pip install rarfile来解压rar文件": "Need to install pip install rarfile to extract rar files", - "需要安装pip install py7zr来解压7z文件": "Need to install pip install py7zr to extract 7z files", - "当文件被上传时的回调函数": "Callback function when a file is uploaded", - "我上传了文件": "I uploaded a file", - "请查收": "Please check", - "收到以下文件": "Received the following files", - "调用路径参数已自动修正到": "The call path parameter has been automatically corrected to", - "现在您点击任意“红颜色”标识的函数插件时": "Now when you click any function plugin with a 'red' label", - "以上文件将被作为输入参数": "The above files will be used as input parameters", - "汇总报告如何远程获取": "How to remotely access the summary report", - "汇总报告已经添加到右侧“文件上传区”": "The summary report has been added to the 'file upload area' on the right", - "可能处于折叠状态": "It may be in a collapsed state", - "检测到": "Detected", - "个": "items", - "您提供的api-key不满足要求": "The api-key you provided does not meet the requirements", - "不包含任何可用于": "Does not contain any that can be used for", - "的api-key": "api-key", - "您可能选择了错误的模型或请求源": "You may have selected the wrong model or request source", - "环境变量可以是": "Environment variables can be", - "优先": "preferred", - "也可以直接是": "or can be directly", - "例如在windows cmd中": "For example, in windows cmd", - "既可以写": "it can be written as", - "也可以写": "or as", - "尝试加载": "Attempting to load", - "默认值": "Default value", - "修正值": "Corrected value", - "环境变量": "Environment variable", - "不支持通过环境变量设置!": "Setting through environment variables is not supported!", - "加载失败!": "Loading failed!", - "如": " e.g., ", - "成功读取环境变量": "Successfully read environment variable: ", - "本项目现已支持OpenAI和API2D的api-key": "This project now supports api-keys for OpenAI and API2D", - "也支持同时填写多个api-key": "It also supports filling in multiple api-keys at the same time", - "您既可以在config.py中修改api-key": "You can modify the api-key in config.py", - "也可以在问题输入区输入临时的api-key": "You can also enter a temporary api-key in the question input area", - "然后回车键提交后即可生效": "After submitting with the enter key, it will take effect", - "您的 API_KEY 是": "Your API_KEY is", - "*** API_KEY 导入成功": "*** API_KEY imported successfully", - "请在config文件中修改API密钥之后再运行": "Please modify the API key in the config file before running", - "网络代理状态": "Network proxy status", - "未配置": "Not configured", - "无代理状态下很可能无法访问OpenAI家族的模型": "", - "建议": "Suggestion", - "检查USE_PROXY选项是否修改": "Check if the USE_PROXY option has been modified", - "已配置": "Configured", - "配置信息如下": "Configuration information is as follows", - "proxies格式错误": "Proxies format error", - "请注意proxies选项的格式": "Please note the format of the proxies option", - "不要遗漏括号": "Do not miss the parentheses", - "这段代码定义了一个名为DummyWith的空上下文管理器": "This code defines an empty context manager named DummyWith", - "它的作用是……额……就是不起作用": "Its purpose is...um...to not do anything", - "即在代码结构不变得情况下取代其他的上下文管理器": "That is, to replace other context managers without changing the code structure", - "上下文管理器是一种Python对象": "Context managers are a type of Python object", - "用于与with语句一起使用": "Used in conjunction with the with statement", - "以确保一些资源在代码块执行期间得到正确的初始化和清理": "To ensure that some resources are properly initialized and cleaned up during code block execution", - "上下文管理器必须实现两个方法": "Context managers must implement two methods", - "分别为 __enter__": "They are __enter__", - "和 __exit__": "and __exit__", - "在上下文执行开始的情况下": "At the beginning of the context execution", - "方法会在代码块被执行前被调用": "The method is called before the code block is executed", - "而在上下文执行结束时": "While at the end of the context execution", - "方法则会被调用": "The method is called", - "把gradio的运行地址更改到指定的二次路径上": "Change the running address of Gradio to the specified secondary path", - "通过裁剪来缩短历史记录的长度": "Shorten the length of the history by trimming", - "此函数逐渐地搜索最长的条目进行剪辑": "This function gradually searches for the longest entry to clip", - "直到历史记录的标记数量降低到阈值以下": "Until the number of history markers is reduced to below the threshold", - "应急食品是“原神”游戏中的角色派蒙的外号": "Emergency Food is the nickname of the character Paimon in the game Genshin Impact", - "安全第一条": "Safety first", - "后面两句是": "The next two sentences are", - "亲人两行泪": "Two lines of tears for loved ones", - "test_解析一个Cpp项目": "test_Parse a Cpp project", - "test_联网回答问题": "test_Answer questions online", - "这是什么": "What is this?", - "这个文件用于函数插件的单元测试": "This file is used for unit testing of function plugins", - "运行方法 python crazy_functions/crazy_functions_test.py": "Run the command 'python crazy_functions/crazy_functions_test.py'", - "AutoGPT是什么": "What is AutoGPT?", - "当前问答": "Current Q&A", - "程序完成": "Program completed", - "回车退出": "Press Enter to exit", - "退出": "Exit", - "当 输入部分的token占比 小于 全文的一半时": "When the proportion of tokens in the input part is less than half of the entire text", - "只裁剪历史": "Trim only history", - "用户反馈": "User feedback", - "第一种情况": "First scenario", - "顺利完成": "Completed smoothly", - "第二种情况": "Second scenario", - "Token溢出": "Token overflow", - "选择处理": "Choose processing", - "尝试计算比例": "Attempt to calculate ratio", - "尽可能多地保留文本": "Retain text as much as possible", - "返回重试": "Return and retry", - "选择放弃": "Choose to give up", - "放弃": "Give up", - "第三种情况": "Third scenario", - "其他错误": "Other errors", - "重试几次": "Retry several times", - "提交任务": "Submit task", - "yield一次以刷新前端页面": "Yield once to refresh the front-end page", - "“喂狗”": "Feed the dog", - "看门狗": "Watchdog", - "如果最后成功了": "If successful in the end", - "则删除报错信息": "Delete error message", - "读取配置文件": "Read configuration file", - "屏蔽掉 chatglm的多线程": "Disable chatglm's multi-threading", - "可能会导致严重卡顿": "May cause serious lag", - "跨线程传递": "Cross-thread communication", - "子线程任务": "Sub-thread task", - "也许等待十几秒后": "Perhaps after waiting for more than ten seconds", - "情况会好转": "The situation will improve", - "开始重试": "Start retrying", - "异步任务开始": "Asynchronous task starts", - "更好的UI视觉效果": "Better UI visual effects", - "每个线程都要“喂狗”": "Each thread needs to \"feed the dog\"", - "在前端打印些好玩的东西": "Print some fun things in the front end", - "异步任务结束": "Asynchronous task ends", - "是否在结束时": "Whether to display the result on the interface when ending", - "在界面上显示结果": "Display the result on the interface", - "递归": "Recursion", - "列表递归接龙": "List recursion chaining", - "第1次尝试": "1st attempt", - "将双空行": "Use double blank lines as splitting points", - "作为切分点": "As a splitting point", - "第2次尝试": "2nd attempt", - "将单空行": "Use single blank lines", - "第3次尝试": "3rd attempt", - "将英文句号": "Use English periods", - "这个中文的句号是故意的": "This Chinese period is intentional", - "作为一个标识而存在": "Exists as an identifier", - "第4次尝试": "4th attempt", - "将中文句号": "Chinese period", - "第5次尝试": "5th attempt", - "没办法了": "No other way", - "随便切一下敷衍吧": "Cut it randomly and perfunctorily", - "Index 0 文本": "Index 0 Text", - "Index 1 字体": "Index 1 Font", - "Index 2 框框": "Index 2 Box", - "是否丢弃掉 不是正文的内容": "Whether to discard non-main text content", - "比正文字体小": "Smaller than main text font", - "如参考文献、脚注、图注等": "Such as references, footnotes, captions, etc.", - "小于正文的": "Less than main text", - "时": "When", - "判定为不是正文": "Determined as non-main text", - "有些文章的正文部分字体大小不是100%统一的": "In some articles, the font size of the main text is not 100% consistent", - "有肉眼不可见的小变化": "Small changes invisible to the naked eye", - "第 1 步": "Step 1", - "搜集初始信息": "Collect initial information", - "获取页面上的文本信息": "Get text information on the page", - "块元提取": "Block element extraction", - "第 2 步": "Step 2", - "获取正文主字体": "Get main text font", - "第 3 步": "Step 3", - "切分和重新整合": "Split and reassemble", - "尝试识别段落": "Attempt to identify paragraphs", - "单行 + 字体大": "Single line + Large font", - "尝试识别section": "Attempt to recognize section", - "第 4 步": "Step 4", - "乱七八糟的后处理": "Messy post-processing", - "清除重复的换行": "Remove duplicate line breaks", - "换行 -": "Line break -", - "双换行": "Double line break", - "第 5 步": "Step 5", - "展示分割效果": "Display segmentation effect", - "网络的远程文件": "Remote file on the network", - "直接给定文件": "Directly given file", - "本地路径": "Local path", - "递归搜索": "Recursive search", - "请求GPT模型同时维持用户界面活跃": "Request GPT model while keeping the user interface active", - "输入参数 Args": "Input parameter Args", - "以_array结尾的输入变量都是列表": "Input variables ending in _array are all lists", - "列表长度为子任务的数量": "The length of the list is the number of sub-tasks", - "执行时": "When executing", - "会把列表拆解": "The list will be broken down", - "放到每个子线程中分别执行": "And executed separately in each sub-thread", - "输入": "Input", - "展现在报告中的输入": "Input displayed in the report", - "借助此参数": "With the help of this parameter", - "在汇总报告中隐藏啰嗦的真实输入": "Hide verbose real input in the summary report", - "增强报告的可读性": "Enhance the readability of the report", - "GPT参数": "GPT parameters", - "浮点数": "Floating point number", - "用户界面对话窗口句柄": "Handle of the user interface dialog window", - "用于数据流可视化": "Used for data flow visualization", - "历史": "History", - "对话历史列表": "List of conversation history", - "系统输入": "System input", - "列表": "List", - "用于输入给GPT的前提提示": "Prompt for input to GPT", - "比如你是翻译官怎样怎样": "For example, if you are a translator, how to...", - "刷新时间间隔频率": "Refresh time interval frequency", - "建议低于1": "Suggested to be less than 1", - "不可高于3": "Cannot be higher than 3", - "仅仅服务于视觉效果": "Only serves for visual effects", - "是否自动处理token溢出的情况": "Whether to automatically handle token overflow", - "如果选择自动处理": "If selected to handle automatically", - "则会在溢出时暴力截断": "It will be forcefully truncated when overflow occurs", - "默认开启": "Default enabled", - "失败时的重试次数": "Number of retries when failed", - "输出 Returns": "Output Returns", - "输出": "Output", - "GPT返回的结果": "Result returned by GPT", - "检测到程序终止": "Program termination detected", - "警告": "Warning", - "文本过长将进行截断": "Text will be truncated if too long", - "Token溢出数": "Token overflow count", - "在执行过程中遭遇问题": "Encountered a problem during execution", - "重试中": "Retrying", - "请稍等": "Please wait", - "请求GPT模型的": "Requesting GPT model", - "版": "version", - "具备以下功能": "Features include", - "实时在UI上反馈远程数据流": "Real-time feedback of remote data streams on UI", - "使用线程池": "Using thread pool", - "可调节线程池的大小避免openai的流量限制错误": "The size of the thread pool can be adjusted to avoid openai traffic limit errors", - "处理中途中止的情况": "Handling mid-process interruptions", - "网络等出问题时": "When there are network issues", - "会把traceback和已经接收的数据转入输出": "Traceback and received data will be outputted", - "每个子任务的输入": "Input for each subtask", - "每个子任务展现在报告中的输入": "Input displayed in the report for each subtask", - "llm_kwargs参数": "llm_kwargs parameter", - "历史对话输入": "Historical conversation input", - "双层列表": "Double-layer list", - "第一层列表是子任务分解": "The first layer of the list is the decomposition of subtasks", - "第二层列表是对话历史": "The second layer of the list is the conversation history", - "最大线程数": "Maximum number of threads", - "如果子任务非常多": "If there are many subtasks", - "需要用此选项防止高频地请求openai导致错误": "Use this option to prevent frequent requests to OpenAI that may cause errors", - "数据流的显示最后收到的多少个字符": "Display the last few characters received in the data stream", - "是否在输入过长时": "Automatically truncate text when input is too long", - "自动缩减文本": "Automatically shorten the text", - "在结束时": "At the end", - "把完整输入-输出结果显示在聊天框": "Display the complete input-output results in the chat box", - "子任务失败时的重试次数": "Number of retries when a subtask fails", - "每个子任务的输出汇总": "Summary of output for each subtask", - "如果某个子任务出错": "If a subtask encounters an error", - "response中会携带traceback报错信息": "Traceback error information will be included in the response", - "方便调试和定位问题": "Facilitate debugging and problem locating", - "请开始多线程操作": "Please start multi-threaded operation", - "等待中": "Waiting", - "执行中": "Executing", - "已成功": "Successful", - "截断重试": "Truncated retry", - "线程": "Thread", - "此线程失败前收到的回答": "Answer received by this thread before failure", - "输入过长已放弃": "Input is too long and has been abandoned", - "OpenAI绑定信用卡可解除频率限制": "Binding a credit card to OpenAI can remove frequency restrictions", - "等待重试": "Waiting for retry", - "已失败": "Failed", - "多线程操作已经开始": "Multi-threaded operation has started", - "完成情况": "Completion status", - "存在一行极长的文本!": "There is an extremely long line of text!", - "当无法用标点、空行分割时": "When punctuation and blank lines cannot be used for separation", - "我们用最暴力的方法切割": "We use the most brutal method to cut", - "Tiktoken未知错误": "Tiktok unknown error", - "这个函数用于分割pdf": "This function is used to split PDF", - "用了很多trick": "Used a lot of tricks", - "逻辑较乱": "The logic is messy", - "效果奇好": "The effect is very good", - "**输入参数说明**": "**Input Parameter Description**", - "需要读取和清理文本的pdf文件路径": "The path of the PDF file that needs to be read and cleaned", - "**输出参数说明**": "**Output Parameter Description**", - "清理后的文本内容字符串": "Cleaned text content string", - "第一页清理后的文本内容列表": "List of cleaned text content on the first page", - "**函数功能**": "**Functionality**", - "读取pdf文件并清理其中的文本内容": "Read the PDF file and clean its text content", - "清理规则包括": "Cleaning rules include", - "提取所有块元的文本信息": "Extract text information from all block elements", - "并合并为一个字符串": "And merge into one string", - "去除短块": "Remove short blocks", - "字符数小于100": "Character count is less than 100", - "并替换为回车符": "And replace with a carriage return", - "合并小写字母开头的段落块并替换为空格": "Merge paragraph blocks that start with lowercase letters and replace with spaces", - "将每个换行符替换为两个换行符": "Replace each line break with two line breaks", - "使每个段落之间有两个换行符分隔": "Separate each paragraph with two line breaks", - "提取文本块主字体": "Main font of extracted text block", - "提取字体大小是否近似相等": "Whether the font sizes of extracted text are approximately equal", - "这个函数是用来获取指定目录下所有指定类型": "This function is used to get all files of a specified type in a specified directory", - "如.md": "such as .md", - "的文件": "files", - "并且对于网络上的文件": "and for files on the internet", - "也可以获取它": "it can also be obtained", - "下面是对每个参数和返回值的说明": "Below are explanations for each parameter and return value", - "参数": "Parameters", - "路径或网址": "Path or URL", - "表示要搜索的文件或者文件夹路径或网络上的文件": "Indicates the file or folder path to be searched or the file on the internet", - "字符串": "String", - "表示要搜索的文件类型": "Indicates the file type to be searched", - "默认是.md": "default is .md", - "返回值": "Return value", - "布尔值": "Boolean value", - "表示函数是否成功执行": "Indicates whether the function is executed successfully", - "文件路径列表": "List of file paths", - "里面包含以指定类型为后缀名的所有文件的绝对路径": "Contains the absolute paths of all files with the specified type as the suffix", - "表示文件所在的文件夹路径": "Indicates the folder path where the file is located", - "如果是网络上的文件": "If it is a file on the internet", - "就是临时文件夹的路径": "it is the path of the temporary folder", - "该函数详细注释已添加": "Detailed comments for this function have been added", - "请确认是否满足您的需要": "Please confirm if it meets your needs", - "读取Latex文件": "Read Latex file", - "删除其中的所有注释": "Remove all comments from it", - "定义注释的正则表达式": "Define the regular expression of comments", - "使用正则表达式查找注释": "Use regular expressions to find comments", - "并替换为空字符串": "And replace them with an empty string", - "记录删除注释后的文本": "Record the text after removing comments", - "拆分过长的latex文件": "Split long latex files", - "抽取摘要": "Extract abstract", - "单线": "Single line", - "获取文章meta信息": "Get article meta information", - "多线程润色开始": "Multithreading polishing begins", - "并行任务数量限制": "Parallel task number limit", - "最多同时执行5个": "Up to 5 can be executed at the same time", - "其他的排队等待": "Others are queued and waiting", - "整理结果": "Organize the results", - "基本信息": "Basic information", - "功能、贡献者": "Function, contributor", - "尝试导入依赖": "Attempt to import dependencies", - "如果缺少依赖": "If dependencies are missing", - "则给出安装建议": "Give installation suggestions", - "清空历史": "Clear history", - "以免输入溢出": "To avoid input overflow", - "将长文本分离开来": "Separate long text", - "以下是一篇学术论文中的一段内容": "The following is a paragraph from an academic paper", - "请将此部分润色以满足学术标准": "Please polish this section to meet academic standards", - "提高语法、清晰度和整体可读性": "Improve grammar, clarity, and overall readability", - "不要修改任何LaTeX命令": "Do not modify any LaTeX commands", - "例如\\section": "such as \\section", - "\\cite和方程式": "\\cite and equations", - "润色": "Polishing", - "你是一位专业的中文学术论文作家": "You are a professional Chinese academic paper writer", - "完成了吗": "Are you done?", - "函数插件功能": "Function plugin feature", - "对整个Latex项目进行润色": "Polish the entire Latex project", - "函数插件贡献者": "Function plugin contributor", - "解析项目": "Parsing project", - "导入软件依赖失败": "Failed to import software dependencies", - "使用该模块需要额外依赖": "Using this module requires additional dependencies", - "安装方法": "Installation method", - "空空如也的输入栏": "Empty input field", - "找不到本地项目或无权访问": "Cannot find local project or do not have access", - "找不到任何.tex文件": "Cannot find any .tex files", - "OpenAI所允许的最大并行过载": "Maximum parallel overload allowed by OpenAI", - "翻译": "Translation", - "对整个Latex项目进行翻译": "Translate the entire Latex project", - "提取摘要": "Extract abstract", - "下载PDF文档": "Download PDF document", - "翻译摘要等": "Translate abstract, etc.", - "写入文件": "Writing to file", - "重置文件的创建时间": "Resetting file creation time", - "下载编号": "Download number", - "自动定位": "Auto-locating", - "不能识别的URL!": "Unrecognized URL!", - "下载中": "Downloading", - "下载完成": "Download complete", - "正在获取文献名!": "Getting article name!", - "年份获取失败": "Failed to get year", - "authors获取失败": "Failed to get authors", - "获取成功": "Successfully retrieved", - "函数插件作者": "Function plugin author", - "正在提取摘要并下载PDF文档……": "Extracting abstract and downloading PDF document...", - "下载pdf文件未成功": "PDF file download unsuccessful", - "请你阅读以下学术论文相关的材料": "Please read the following academic paper related materials", - "翻译为中文": "Translate to Chinese", - "材料如下": "Materials are as follows", - "论文": "Paper", - "PDF文件也已经下载": "PDF file has also been downloaded", - "剩下的情况都开头除去": "Remove the beginning of the remaining situation", - "结尾除去一次": "Remove the end once", - "第1步": "Step 1", - "第2步": "Step 2", - "第3步": "Step 3", - "集合文件": "Collection file", - "第4步": "Step 4", - "随便显示点什么防止卡顿的感觉": "Display something randomly to prevent lagging", - "第5步": "Step 5", - "Token限制下的截断与处理": "Truncation and processing under Token restriction", - "第6步": "Step 6", - "任务函数": "Task function", - "分解代码文件": "Decompose code files", - "第7步": "Step 7", - "所有线程同时开始执行任务函数": "All threads start executing task functions simultaneously", - "第8步": "Step 8", - "循环轮询各个线程是否执行完毕": "Loop and poll whether each thread has finished executing", - "第9步": "Step 9", - "把结果写入文件": "Write the results to a file", - "这里其实不需要join了": "Join is not needed here", - "肯定已经都结束了": "They must have all finished", - "失败": "Failure", - "第10步": "Step 10", - "备份一个文件": "Backup a file", - "接下来请将以下代码中包含的所有中文转化为英文": "Please translate all Chinese in the following code into English", - "只输出转化后的英文代码": "Output only the translated English code", - "请用代码块输出代码": "Please output the code using code blocks", - "等待多线程操作": "Waiting for multi-threaded operations", - "中间过程不予显示": "Intermediate processes will not be displayed", - "聊天显示框的句柄": "Chat display box handle", - "用于显示给用户": "Displayed to the user", - "聊天历史": "Chat history", - "前情提要": "Context summary", - "给gpt的静默提醒": "Silent reminder to GPT", - "当前软件运行的端口号": "Current software running port number", - "这是什么功能": "What is this function", - "生成图像": "Generate image", - "请先把模型切换至gpt-xxxx或者api2d-xxxx": "Please switch the model to gpt-xxxx or api2d-xxxx first", - "如果中文效果不理想": "If the Chinese effect is not ideal", - "尝试Prompt": "Try Prompt", - "正在处理中": "Processing", - "图像中转网址": "Image transfer URL", - "中转网址预览": "Transfer URL preview", - "本地文件地址": "Local file address", - "本地文件预览": "Local file preview", - "chatGPT对话历史": "ChatGPT conversation history", - "对话历史": "Conversation history", - "对话历史写入": "Conversation history written", - "存档文件详情": "Archive file details", - "载入对话": "Load conversation", - "条": "条", - "上下文": "Context", - "保存当前对话": "Save current conversation", - "您可以调用“LoadConversationHistoryArchive”还原当下的对话": "You can call 'LoadConversationHistoryArchive' to restore the current conversation", - "警告!被保存的对话历史可以被使用该系统的任何人查阅": "Warning! The saved conversation history can be viewed by anyone using this system", - "正在查找对话历史文件": "Looking for conversation history file", - "html格式": "HTML format", - "找不到任何html文件": "No HTML files found", - "但本地存储了以下历史文件": "But the following history files are stored locally", - "您可以将任意一个文件路径粘贴到输入区": "You can paste any file path into the input area", - "然后重试": "and try again", - "载入对话历史文件": "Load conversation history file", - "对话历史文件损坏!": "Conversation history file is corrupted!", - "删除所有历史对话文件": "Delete all history conversation files", - "已删除": "Deleted", - "pip install python-docx 用于docx格式": "pip install python-docx for docx format", - "跨平台": "Cross-platform", - "pip install pywin32 用于doc格式": "pip install pywin32 for doc format", - "仅支持Win平台": "Only supports Win platform", - "打开文件": "Open file", - "rar和7z格式正常": "RAR and 7z formats are normal", - "故可以只分析文章内容": "So you can only analyze the content of the article", - "不输入文件名": "Do not enter the file name", - "已经对该文章的所有片段总结完毕": "All segments of the article have been summarized", - "如果文章被切分了": "If the article is cut into pieces", - "检测输入参数": "Checking input parameters", - "如没有给定输入参数": "If no input parameters are given", - "直接退出": "Exit directly", - "搜索需要处理的文件清单": "Search for the list of files to be processed", - "如果没找到任何文件": "If no files are found", - "开始正式执行任务": "Start executing the task formally", - "请对下面的文章片段用中文做概述": "Please summarize the following article fragment in Chinese", - "文章内容是": "The content of the article is", - "请对下面的文章片段做概述": "Please summarize the following article fragment", - "的第": "The", - "个片段": "fragment", - "总结文章": "Summarize the article", - "根据以上的对话": "According to the conversation above", - "的主要内容": "The main content of", - "所有文件都总结完成了吗": "Are all files summarized?", - "如果是.doc文件": "If it is a .doc file", - "请先转化为.docx格式": "Please convert it to .docx format first", - "找不到任何.docx或doc文件": "Cannot find any .docx or .doc files", - "读取Markdown文件": "Read Markdown file", - "拆分过长的Markdown文件": "Split overlong Markdown file", - "什么都没有": "Nothing at all", - "对整个Markdown项目进行翻译": "Translate the entire Markdown project", - "找不到任何.md文件": "Cannot find any .md files", - "句子结束标志": "End of sentence marker", - "尽量是完整的一个section": "Try to use a complete section", - "比如introduction": "such as introduction", - "experiment等": "experiment, etc.", - "必要时再进行切割": "cut if necessary", - "的长度必须小于 2500 个 Token": "its length must be less than 2500 tokens", - "尝试": "try", - "按照章节切割PDF": "cut PDF by sections", - "从摘要中提取高价值信息": "extract high-value information from the abstract", - "放到history中": "put it in history", - "迭代地历遍整个文章": "iterate through the entire article", - "提取精炼信息": "extract concise information", - "用户提示": "user prompt", - "初始值是摘要": "initial value is the abstract", - "i_say=真正给chatgpt的提问": "i_say=questions actually asked to chatgpt", - "i_say_show_user=给用户看的提问": "i_say_show_user=questions shown to the user", - "迭代上一次的结果": "iterate over the previous result", - "提示": "prompt", - "整理history": "organize history", - "接下来两句话只显示在界面上": "the next two sentences are only displayed on the interface", - "不起实际作用": "do not have an actual effect", - "设置一个token上限": "set a token limit", - "防止回答时Token溢出": "prevent token overflow when answering", - "注意这里的历史记录被替代了": "note that the history record here has been replaced", - "首先你在英文语境下通读整篇论文": "First, read the entire paper in an English context", - "收到": "Received", - "文章极长": "Article is too long", - "不能达到预期效果": "Cannot achieve expected results", - "接下来": "Next", - "你是一名专业的学术教授": "You are a professional academic professor", - "利用以上信息": "Utilize the above information", - "使用中文回答我的问题": "Answer my questions in Chinese", - "理解PDF论文内容": "Understand the content of a PDF paper", - "并且将结合上下文内容": "And will combine with the context", - "进行学术解答": "Provide academic answers", - "请对下面的程序文件做一个概述": "Please provide an overview of the program file below", - "并对文件中的所有函数生成注释": "And generate comments for all functions in the file", - "使用markdown表格输出结果": "Output the results using markdown tables", - "文件内容是": "The file content is", - "在此处替换您要搜索的关键词": "Replace the keywords you want to search here", - "爬取搜索引擎的结果": "Crawl the results of search engines", - "依次访问网页": "Visit web pages in order", - "最多收纳多少个网页的结果": "Include results from how many web pages at most", - "ChatGPT综合": "ChatGPT synthesis", - "裁剪输入": "Trim the input", - "从最长的条目开始裁剪": "Start trimming from the longest entry", - "防止爆token": "Prevent token explosion", - "无法连接到该网页": "Cannot connect to the webpage", - "请结合互联网信息回答以下问题": "Please answer the following questions based on internet information", - "请注意": "Please note", - "您正在调用一个": "You are calling a", - "函数插件": "function plugin", - "的模板": "template", - "该模板可以实现ChatGPT联网信息综合": "This template can achieve ChatGPT network information integration", - "该函数面向希望实现更多有趣功能的开发者": "This function is aimed at developers who want to implement more interesting features", - "它可以作为创建新功能函数的模板": "It can be used as a template for creating new feature functions", - "您若希望分享新的功能模组": "If you want to share new feature modules", - "请不吝PR!": "Please don't hesitate to PR!", - "第": "The", - "份搜索结果": "search results", - "从以上搜索结果中抽取信息": "Extract information from the above search results", - "然后回答问题": "Then answer the question", - "请从给定的若干条搜索结果中抽取信息": "Please extract information from the given search results", - "对最相关的两个搜索结果进行总结": "Summarize the two most relevant search results", - "拆分过长的IPynb文件": "Splitting overly long IPynb files", - "的分析如下": "analysis is as follows", - "解析的结果如下": "The parsing result is as follows", - "对IPynb文件进行解析": "Parse the IPynb file", - "找不到任何.ipynb文件": "Cannot find any .ipynb files", - "第一步": "Step one", - "逐个文件分析": "Analyze each file", - "读取文件": "Read the file", - "装载请求内容": "Load the request content", - "文件读取完成": "File reading completed", - "对每一个源代码文件": "For each source code file", - "生成一个请求线程": "Generate a request thread", - "发送到chatgpt进行分析": "Send to chatgpt for analysis", - "全部文件解析完成": "All files parsed", - "结果写入文件": "Write results to file", - "准备对工程源代码进行汇总分析": "Prepare to summarize and analyze project source code", - "第二步": "Step two", - "综合": "Synthesis", - "单线程": "Single thread", - "分组+迭代处理": "Grouping + iterative processing", - "10个文件为一组": "10 files per group", - "只保留文件名节省token": "Keep only file names to save tokens", - "裁剪input": "Trim input", - "迭代之前的分析": "Analysis before iteration", - "将要匹配的模式": "Pattern to match", - "不输入即全部匹配": "Match all if not input", - "将要忽略匹配的文件后缀": "File suffixes to ignore in matching", - "避免解析压缩文件": "Avoid parsing compressed files", - "将要忽略匹配的文件名": "File names to ignore in matching", - "生成正则表达式": "Generate regular expression", - "若上传压缩文件": "If uploading compressed files", - "先寻找到解压的文件夹路径": "First find the path of the decompressed folder", - "从而避免解析压缩文件": "Thus avoid parsing compressed files", - "按输入的匹配模式寻找上传的非压缩文件和已解压的文件": "Find uncompressed and decompressed files uploaded according to the input matching pattern", - "源文件太多": "Too many source files", - "超过512个": "Exceeds 512", - "请缩减输入文件的数量": "Please reduce the number of input files", - "或者": "Or", - "您也可以选择删除此行警告": "You can also choose to delete this line of warning", - "并修改代码拆分file_manifest列表": "And modify the code to split the file_manifest list", - "从而实现分批次处理": "To achieve batch processing", - "接下来请你逐文件分析下面的工程": "Next, please analyze the following project file by file", - "请对下面的程序文件做一个概述文件名是": "Please give an overview of the following program files, the file name is", - "你是一个程序架构分析师": "You are a program architecture analyst", - "正在分析一个源代码项目": "Analyzing a source code project", - "你的回答必须简单明了": "Your answer must be concise and clear", - "完成": "Completed", - "逐个文件分析已完成": "Analysis of each file has been completed", - "正在开始汇总": "Starting to summarize", - "用一张Markdown表格简要描述以下文件的功能": "Briefly describe the functions of the following files in a Markdown table", - "根据以上分析": "Based on the above analysis", - "用一句话概括程序的整体功能": "Summarize the overall function of the program in one sentence", - "对程序的整体功能和构架重新做出概括": "Redescribe the overall function and architecture of the program", - "由于输入长度限制": "Due to input length limitations", - "可能需要分组处理": "Group processing may be required", - "本组文件为": "This group of files is", - "+ 已经汇总的文件组": "+ Files group already summarized", - "正在分析一个项目的源代码": "Analyzing source code of a project", - "找不到任何python文件": "No Python files found", - "找不到任何.h头文件": "No .h header files found", - "找不到任何java文件": "No Java files found", - "找不到任何前端相关文件": "No front-end related files found", - "找不到任何golang文件": "No Golang files found", - "找不到任何rust文件": "No Rust files found", - "找不到任何lua文件": "No Lua files found", - "找不到任何CSharp文件": "No CSharp files found", - "找不到任何文件": "No files found", - "正在同时咨询ChatGPT和ChatGLM……": "Consulting ChatGPT and ChatGLM simultaneously...", - "发送 GET 请求": "Sending GET request", - "解析网页内容": "Parsing webpage content", - "获取所有文章的标题和作者": "Getting titles and authors of all articles", - "引用次数是链接中的文本": "The number of citations is in the link text", - "直接取出来": "Take it out directly", - "摘要在 .gs_rs 中的文本": "The summary is in the .gs_rs text", - "需要清除首尾空格": "Need to remove leading and trailing spaces", - "是否在arxiv中": "Is it in arxiv?", - "不在arxiv中无法获取完整摘要": "Cannot get complete summary if it is not in arxiv", - "分析用户提供的谷歌学术": "Analyzing Google Scholar provided by the user", - "搜索页面中": "In the search page", - "出现的所有文章": "All articles that appear", - "插件初始化中": "Plugin initializing", - "下面是一些学术文献的数据": "Below are some academic literature data", - "当你想发送一张照片时": "When you want to send a photo", - "使用 Unsplash API": "Use Unsplash API", - "匹配^数字^": "Match ^number^", - "将匹配到的数字作为替换值": "Replace the matched number as the replacement value", - "替换操作": "Replacement operation", - "质能方程式": "Mass-energy equivalence equation", - "知乎": "Zhihu", - "你好": "Hello", - "这是必应": "This is Bing", - "质能方程是描述质量与能量之间的当量关系的方程": "The mass-energy equivalence equation describes the equivalent relationship between mass and energy", - "用tex格式": "In tex format", - "质能方程可以写成$$E=mc^2$$": "The mass-energy equivalence equation can be written as $$E=mc^2$$", - "其中$E$是能量": "Where $E$ is energy", - "$m$是质量": "$m$ is mass", - "$c$是光速": "$c$ is the speed of light", - "Endpoint 重定向": "Endpoint redirection", - "兼容旧版的配置": "Compatible with old version configuration", - "新版配置": "New version configuration", - "获取tokenizer": "Get tokenizer", - "如果只询问1个大语言模型": "If only one large language model is queried", - "如果同时InquiryMultipleLargeLanguageModels": "If InquiryMultipleLargeLanguageModels is queried at the same time", - "观察窗": "Observation window", - "该文件中主要包含2个函数": "There are mainly 2 functions in this file", - "是所有LLM的通用接口": "It is a common interface for all LLMs", - "它们会继续向下调用更底层的LLM模型": "They will continue to call lower-level LLM models", - "处理多模型并行等细节": "Handling details such as multi-model parallelism", - "不具备多线程能力的函数": "Functions without multi-threading capability", - "正常对话时使用": "Used in normal conversation", - "具备完备的交互功能": "Fully interactive", - "不可多线程": "Not multi-threaded", - "具备多线程调用能力的函数": "Functions with multi-threading capability", - "在函数插件中被调用": "Called in function plugins", - "灵活而简洁": "Flexible and concise", - "正在加载tokenizer": "Loading tokenizer", - "如果是第一次运行": "If it is the first time running", - "可能需要一点时间下载参数": "It may take some time to download parameters", - "加载tokenizer完毕": "Loading tokenizer completed", - "警告!API_URL配置选项将被弃用": "Warning! The API_URL configuration option will be deprecated", - "请更换为API_URL_REDIRECT配置": "Please replace it with the API_URL_REDIRECT configuration", - "将错误显示出来": "Display errors", - "发送至LLM": "Send to LLM", - "等待回复": "Waiting for reply", - "一次性完成": "Completed in one go", - "不显示中间过程": "Do not display intermediate processes", - "但内部用stream的方法避免中途网线被掐": "But internally use the stream method to avoid the network being cut off midway", - "是本次问询的输入": "This is the input of this inquiry", - "系统静默prompt": "System silent prompt", - "LLM的内部调优参数": "LLM's internal tuning parameters", - "是之前的对话列表": "history is the list of previous conversations", - "用于负责跨越线程传递已经输出的部分": "Used to transfer the already output part across threads", - "大部分时候仅仅为了fancy的视觉效果": "Most of the time it's just for fancy visual effects", - "留空即可": "Leave it blank", - "观测窗": "Observation window", - "TGUI不支持函数插件的实现": "TGUI does not support the implementation of function plugins", - "说": "Say", - "流式获取输出": "Get output in a streaming way", - "用于基础的对话功能": "Used for basic conversation functions", - "inputs 是本次问询的输入": "inputs are the inputs for this inquiry", - "temperature是LLM的内部调优参数": "Temperature is an internal tuning parameter of LLM", - "history 是之前的对话列表": "history is the list of previous conversations", - "注意无论是inputs还是history": "Note that both inputs and history", - "内容太长了都会触发token数量溢出的错误": "An error of token overflow will be triggered if the content is too long", - "chatbot 为WebUI中显示的对话列表": "chatbot is the conversation list displayed in WebUI", - "修改它": "Modify it", - "然后yeild出去": "Then yield it out", - "可以直接修改对话界面内容": "You can directly modify the conversation interface content", - "additional_fn代表点击的哪个按钮": "additional_fn represents which button is clicked", - "按钮见functional.py": "See functional.py for buttons", - "子进程执行": "Subprocess execution", - "第一次运行": "First run", - "加载参数": "Load parameters", - "进入任务等待状态": "Enter task waiting state", - "收到消息": "Received message", - "开始请求": "Start requesting", - "中途接收可能的终止指令": "Receive possible termination command in the middle", - "如果有的话": "If any", - "请求处理结束": "Request processing ends", - "开始下一个循环": "Start the next loop", - "主进程执行": "Main process execution", - "chatglm 没有 sys_prompt 接口": "ChatGLM has no sys_prompt interface", - "因此把prompt加入 history": "Therefore, add prompt to history", - "的耐心": "Patience", - "设置5秒即可": "Set 5 seconds", - "热更新prompt": "Hot update prompt", - "获取预处理函数": "Get preprocessing function", - "处理历史信息": "Process historical information", - "开始接收chatglm的回复": "Start receiving replies from ChatGLM", - "总结输出": "Summary output", - "ChatGLM尚未加载": "ChatGLM has not been loaded", - "加载需要一段时间": "Loading takes some time", - "取决于": "Depending on", - "的配置": "Configuration", - "ChatGLM消耗大量的内存": "ChatGLM consumes a lot of memory", - "或显存": "Or video memory", - "也许会导致低配计算机卡死 ……": "May cause low-end computers to freeze...", - "依赖检测通过": "Dependency check passed", - "缺少ChatGLM的依赖": "Missing dependency for ChatGLM", - "如果要使用ChatGLM": "If you want to use ChatGLM", - "除了基础的pip依赖以外": "In addition to the basic pip dependencies", - "您还需要运行": "You also need to run", - "安装ChatGLM的依赖": "Install dependencies for ChatGLM", - "Call ChatGLM fail 不能正常加载ChatGLM的参数": "Call ChatGLM fail, unable to load parameters for ChatGLM", - "不能正常加载ChatGLM的参数!": "Unable to load parameters for ChatGLM!", - "多线程方法": "Multithreading method", - "函数的说明请见 request_llms/bridge_all.py": "For function details, please see request_llms/bridge_all.py", - "程序终止": "Program terminated", - "单线程方法": "Single-threaded method", - "等待ChatGLM响应中": "Waiting for response from ChatGLM", - "ChatGLM响应异常": "ChatGLM response exception", - "借鉴了 https": "Referenced from https", - "config_private.py放自己的秘密如API和代理网址": "Put your own secrets such as API and proxy address in config_private.py", - "读取时首先看是否存在私密的config_private配置文件": "When reading, first check if there is a private config_private configuration file", - "不受git管控": "Not controlled by git", - "则覆盖原config文件": "Then overwrite the original config file", - "看门狗的耐心": "The patience of the watchdog", - "失败了": "Failed", - "重试一次": "Retry once", - "再失败就没办法了": "If it fails again, there is no way", - "api2d 正常完成": "api2d completed normally", - "把已经获取的数据显示出去": "Display the data already obtained", - "如果超过期限没有喂狗": "If the dog is not fed beyond the deadline", - "则终止": "then terminate", - "非OpenAI官方接口的出现这样的报错": "such errors occur in non-OpenAI official interfaces", - "OpenAI和API2D不会走这里": "OpenAI and API2D will not go here", - "数据流的第一帧不携带content": "The first frame of the data stream does not carry content", - "前者API2D的": "The former is API2D", - "判定为数据流的结束": "Judged as the end of the data stream", - "gpt_replying_buffer也写完了": "gpt_replying_buffer is also written", - "处理数据流的主体": "Processing the body of the data stream", - "如果这里抛出异常": "If an exception is thrown here", - "一般是文本过长": "It is usually because the text is too long", - "详情见get_full_error的输出": "See the output of get_full_error for details", - "清除当前溢出的输入": "Clear the current overflow input", - "是本次输入": "It is the input of this time", - "是本次输出": "It is the output of this time", - "history至少释放二分之一": "Release at least half of the history", - "清除历史": "Clear the history", - "该文件中主要包含三个函数": "This file mainly contains three functions", - "高级实验性功能模块调用": "Calling advanced experimental function modules", - "不会实时显示在界面上": "Will not be displayed on the interface in real time", - "参数简单": "The parameters are simple", - "可以多线程并行": "Can be multi-threaded and parallel", - "方便实现复杂的功能逻辑": "Convenient for implementing complex functional logic", - "在实验过程中发现调用predict_no_ui处理长文档时": "It was found during the experiment that when calling predict_no_ui to process long documents,", - "和openai的连接容易断掉": "Connection to OpenAI is prone to disconnection", - "这个函数用stream的方式解决这个问题": "This function solves the problem using stream", - "同样支持多线程": "Also supports multi-threading", - "网络错误": "Network error", - "检查代理服务器是否可用": "Check if the proxy server is available", - "以及代理设置的格式是否正确": "And if the format of the proxy settings is correct", - "格式须是": "The format must be", - "缺一不可": "All parts are necessary", - "获取完整的从Openai返回的报错": "Get the complete error message returned from OpenAI", - "发送至chatGPT": "Send to chatGPT", - "chatGPT的内部调优参数": "Internal tuning parameters of chatGPT", - "请求超时": "Request timed out", - "正在重试": "Retrying", - "OpenAI拒绝了请求": "OpenAI rejected the request", - "用户取消了程序": "User canceled the program", - "意外Json结构": "Unexpected JSON structure", - "正常结束": "Normal termination", - "但显示Token不足": "But shows insufficient token", - "导致输出不完整": "Resulting in incomplete output", - "请削减单次输入的文本量": "Please reduce the amount of text input per request", - "temperature是chatGPT的内部调优参数": "Temperature is an internal tuning parameter of chatGPT", - "输入已识别为openai的api_key": "The input has been recognized as OpenAI's api_key", - "api_key已导入": "api_key has been imported", - "缺少api_key": "Missing api_key", - "MOSS尚未加载": "MOSS has not been loaded yet", - "MOSS消耗大量的内存": "MOSS consumes a lot of memory", - "缺少MOSS的依赖": "Lack of dependencies for MOSS", - "如果要使用MOSS": "If you want to use MOSS", - "安装MOSS的依赖": "Install dependencies for MOSS", - "Call MOSS fail 不能正常加载MOSS的参数": "Call MOSS fail, unable to load MOSS parameters normally", - "不能正常加载MOSS的参数!": "Unable to load MOSS parameters normally!", - "等待MOSS响应中": "Waiting for MOSS response", - "MOSS响应异常": "MOSS response exception", - "读取配置": "Read configuration", - "等待": "Waiting", - "开始问问题": "Start asking questions", - "追加历史": "Append history", - "问题": "Question", - "代理设置": "Proxy settings", - "发送请求到子进程": "Send request to child process", - "等待newbing回复的片段": "Waiting for the fragment of newbing reply", - "结束": "End", - "newbing回复的片段": "Fragment of newbing reply", - "没有 sys_prompt 接口": "No sys_prompt interface", - "来自EdgeGPT.py": "From EdgeGPT.py", - "等待NewBing响应": "Waiting for NewBing response", - "子进程Worker": "Child process Worker", - "调用主体": "Call subject", - "注意目前不能多人同时调用NewBing接口": "Note that currently multiple people cannot call the NewBing interface at the same time", - "有线程锁": "There is a thread lock", - "否则将导致每个人的NewBing问询历史互相渗透": "Otherwise, each person's NewBing inquiry history will penetrate each other", - "调用NewBing时": "When calling NewBing", - "会自动使用已配置的代理": "the configured proxy will be automatically used", - "缺少的依赖": "Missing dependencies", - "如果要使用Newbing": "If you want to use Newbing", - "安装Newbing的依赖": "Install the dependencies for Newbing", - "这个函数运行在子进程": "This function runs in a child process", - "不能加载Newbing组件": "Cannot load Newbing components", - "NEWBING_COOKIES未填写或有格式错误": "NEWBING_COOKIES is not filled in or has a format error", - "Newbing失败": "Newbing failed", - "这个函数运行在主进程": "This function runs in the main process", - "第三部分": "Part III", - "主进程统一调用函数接口": "The main process calls the function interface uniformly", - "等待NewBing响应中": "Waiting for NewBing response", - "NewBing响应缓慢": "NewBing response is slow", - "尚未完成全部响应": "Not all responses have been completed yet", - "请耐心完成后再提交新问题": "Please be patient and submit a new question after completing all responses", - "NewBing响应异常": "NewBing response is abnormal", - "请刷新界面重试": "Please refresh the page and try again", - "完成全部响应": "All responses have been completed", - "请提交新问题": "Please submit a new question", - "LLM_MODEL 格式不正确!": "LLM_MODEL format is incorrect!", - "对各个llm模型进行单元测试": "Unit testing for each LLM model", - "如何理解传奇?": "How to understand legends?", - "设定一个最小段落长度阈值": "Set a minimum paragraph length threshold", - "对文本进行归一化处理": "Normalize the text", - "分解连字": "Break ligatures", - "替换其他特殊字符": "Replace other special characters", - "替换跨行的连词": "Replace hyphens across lines", - "根据前后相邻字符的特点": "Based on the characteristics of adjacent characters", - "找到原文本中的换行符": "Find line breaks in the original text", - "根据 heuristic 规则": "Based on heuristic rules", - "用空格或段落分隔符替换原换行符": "Replace line breaks with spaces or paragraph separators", - "带超时倒计时": "With timeout countdown", - "根据给定的匹配结果来判断换行符是否表示段落分隔": "Determine whether line breaks indicate paragraph breaks based on given matching results", - "如果换行符前为句子结束标志": "If the line break is preceded by a sentence-ending punctuation mark", - "句号": "period", - "感叹号": "exclamation mark", - "问号": "question mark", - "且下一个字符为大写字母": "and the next character is a capital letter", - "则换行符更有可能表示段落分隔": "the line break is more likely to indicate a paragraph break", - "也可以根据之前的内容长度来判断段落是否已经足够长": "Paragraph length can also be judged based on previous content length", - "通过把连字": "By converting ligatures and other text special characters to their basic forms", - "等文本特殊符号转换为其基本形式来对文本进行归一化处理": "normalize the text by converting special characters to their basic forms", - "对从 PDF 提取出的原始文本进行清洗和格式化处理": "Clean and format the raw text extracted from PDF", - "1. 对原始文本进行归一化处理": "1. Normalize the original text", - "2. 替换跨行的连词": "2. Replace hyphens across lines", - "3. 根据 heuristic 规则判断换行符是否是段落分隔": "3. Determine whether line breaks indicate paragraph breaks based on heuristic rules", - "并相应地进行替换": "And replace accordingly", - "接下来请你逐文件分析下面的论文文件": "Next, please analyze the following paper files one by one", - "概括其内容": "Summarize its content", - "请对下面的文章片段用中文做一个概述": "Please summarize the following article in Chinese", - "请对下面的文章片段做一个概述": "Please summarize the following article", - "根据以上你自己的分析": "According to your own analysis above", - "对全文进行概括": "Summarize the entire text", - "用学术性语言写一段中文摘要": "Write a Chinese abstract in academic language", - "然后再写一段英文摘要": "Then write an English abstract", - "包括": "Including", - "找不到任何.tex或.pdf文件": "Cannot find any .tex or .pdf files", - "读取pdf文件": "Read the pdf file", - "返回文本内容": "Return the text content", - "此版本使用pdfminer插件": "This version uses the pdfminer plugin", - "带token约简功能": "With token reduction function", - "递归地切割PDF文件": "Recursively split the PDF file", - "为了更好的效果": "For better results", - "我们剥离Introduction之后的部分": "We strip the part after Introduction", - "如果有": "If there is", - "多线": "Multi-threaded", - "\\n 翻译": "\\n Translation", - "整理报告的格式": "Organize the format of the report", - "原文": "Original text", - "更新UI": "Update UI", - "准备文件的下载": "Prepare for file download", - "重命名文件": "Rename file", - "以下是一篇学术论文的基础信息": "The following is the basic information of an academic paper", - "请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分": "Please extract the following six parts: \"Title\", \"Conference or Journal\", \"Author\", \"Abstract\", \"Number\", \"Author's Email\"", - "请用markdown格式输出": "Please output in markdown format", - "最后用中文翻译摘要部分": "Finally, translate the abstract into Chinese", - "请提取": "Please extract", - "请从": "Please extract from", - "中提取出“标题”、“收录会议或期刊”等基本信息": "Please extract basic information such as \"Title\" and \"Conference or Journal\" from", - "你需要翻译以下内容": "You need to translate the following content", - "请你作为一个学术翻译": "As an academic translator, please", - "负责把学术论文准确翻译成中文": "be responsible for accurately translating academic papers into Chinese", - "注意文章中的每一句话都要翻译": "Please translate every sentence in the article", - "一、论文概况": "I. Overview of the paper", - "二、论文翻译": "II. Translation of the paper", - "给出输出文件清单": "Provide a list of output files", - "第 0 步": "Step 0", - "切割PDF": "Split PDF", - "每一块": "Each block", - "提取出以下内容": "Extract the following content", - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开": "1. English title; 2. Translation of Chinese title; 3. Author; 4. arxiv open access", - ";4、引用数量": "Number of Citations", - ";5、中文摘要翻译": "Translation of Chinese Abstract", - "以下是信息源": "Here are the Information Sources", - "请分析此页面中出现的所有文章": "Please Analyze all the Articles Appearing on this Page", - "这是第": "This is Batch Number", - "批": "", - "你是一个学术翻译": "You are an Academic Translator", - "请从数据中提取信息": "Please Extract Information from the Data", - "你必须使用Markdown表格": "You Must Use Markdown Tables", - "你必须逐个文献进行处理": "You Must Process Each Document One by One", - "状态": "Status", - "已经全部完成": "All Completed", - "您可以试试让AI写一个Related Works": "You Can Try to Let AI Write a Related Works", - "该函数只有20多行代码": "This Function Has Only 20+ Lines of Code", - "此外我们也提供可同步处理大量文件的多线程Demo供您参考": "In addition, we also provide a multi-threaded demo that can process a large number of files synchronously for your reference", - "历史中哪些事件发生在": "Which Events Happened in History on", - "月": "Month", - "日": "Day", - "列举两条并发送相关图片": "List Two and Send Relevant Pictures", - "发送图片时": "When Sending Pictures", - "请使用Markdown": "Please Use Markdown", - "将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词": "Replace PUT_YOUR_QUERY_HERE in the Unsplash API with the Most Important Word Describing the Event", - "1. 临时解决方案": "1. Temporary Solution", - "直接在输入区键入api_key": "Enter the api_key Directly in the Input Area", - "然后回车提交": "Submit after pressing Enter", - "2. 长效解决方案": "Long-term solution", - "在config.py中配置": "Configure in config.py", - "等待响应": "Waiting for response", - "api-key不满足要求": "API key does not meet requirements", - "远程返回错误": "Remote returns error", - "Json解析不合常规": "Json parsing is not normal", - "Reduce the length. 本次输入过长": "Reduce the length. The input is too long this time", - "或历史数据过长. 历史缓存数据已部分释放": "Or the historical data is too long. Historical cached data has been partially released", - "您可以请再次尝试.": "You can try again.", - "若再次失败则更可能是因为输入过长.": "If it fails again, it is more likely due to input being too long.", - "does not exist. 模型不存在": "Model does not exist", - "或者您没有获得体验资格": "Or you do not have the qualification for experience", - "Incorrect API key. OpenAI以提供了不正确的API_KEY为由": "Incorrect API key. OpenAI claims that an incorrect API_KEY was provided", - "拒绝服务": "Service refused", - "You exceeded your current quota. OpenAI以账户额度不足为由": "You exceeded your current quota. OpenAI claims that the account balance is insufficient", - "Bad forward key. API2D账户额度不足": "Bad forward key. API2D account balance is insufficient", - "Not enough point. API2D账户点数不足": "Not enough point. API2D account points are insufficient", - "Json异常": "Json exception", - "整合所有信息": "Integrate all information", - "选择LLM模型": "Select LLM model", - "生成http请求": "Generate http request", - "为发送请求做准备": "Prepare to send request", - "你提供了错误的API_KEY": "You provided an incorrect API_KEY", - "来保留函数的元信息": "Preserve the metadata of the function", - "并定义了一个名为decorated的内部函数": "and define an inner function named decorated", - "内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块": "The inner function reloads and retrieves the function module by using the reload function of the importlib module and the getmodule function of the inspect module", - "然后通过getattr函数获取函数名": "Then it retrieves the function name using the getattr function", - "并在新模块中重新加载函数": "and reloads the function in the new module", - "最后": "Finally", - "使用yield from语句返回重新加载过的函数": "it returns the reloaded function using the yield from statement", - "并在被装饰的函数上执行": "and executes it on the decorated function", - "最终": "Ultimately", - "装饰器函数返回内部函数": "the decorator function returns the inner function", - "这个内部函数可以将函数的原始定义更新为最新版本": "which can update the original definition of the function to the latest version", - "并执行函数的新版本": "and execute the new version of the function", - "第二部分": "Second part", - "其他小工具": "Other utilities", - "将结果写入markdown文件中": "Write the results to a markdown file", - "将普通文本转换为Markdown格式的文本": "Convert plain text to Markdown formatted text", - "向chatbot中添加简单的意外错误信息": "Add simple unexpected error messages to the chatbot", - "Openai 限制免费用户每分钟20次请求": "Openai limits free users to 20 requests per minute", - "降低请求频率中": "Reduce the request frequency", - "只输出代码": "Output only the code", - "文件名是": "The file name is", - "文件代码是": "The file code is", - "至少一个线程任务Token溢出而失败": "At least one thread task fails due to token overflow", - "至少一个线程任务意外失败": "At least one thread task fails unexpectedly", - "开始了吗": "Has it started?", - "已完成": "Completed", - "的转化": "conversion", - "存入": "saved to", - "生成一份任务执行报告": "Generate a task execution report", - "文件保存到本地": "Save the file locally", - "由于请求gpt需要一段时间": "As requesting GPT takes some time", - "我们先及时地做一次界面更新": "Let's do a UI update in time", - "界面更新": "UI update", - "输入栏用户输入的文本": "Text entered by the user in the input field", - "例如需要翻译的一段话": "For example, a paragraph that needs to be translated", - "再例如一个包含了待处理文件的路径": "For example, a file path that contains files to be processed", - "gpt模型参数": "GPT model parameters", - "如温度和top_p等": "Such as temperature and top_p", - "一般原样传递下去就行": "Generally pass it on as is", - "插件模型的参数": "Plugin model parameters", - "暂时没有用武之地": "No use for the time being", - "找不到任何.tex或pdf文件": "Cannot find any .tex or .pdf files", - "读取PDF文件": "Read PDF file", - "输入中可能存在乱码": "There may be garbled characters in the input", - "是否重置": "Whether to reset", - "jittorllms 没有 sys_prompt 接口": "jittorllms does not have a sys_prompt interface", - "开始接收jittorllms的回复": "Start receiving jittorllms responses", - "jittorllms尚未加载": "jittorllms has not been loaded yet", - "请避免混用多种jittor模型": "Please avoid mixing multiple jittor models", - "否则可能导致显存溢出而造成卡顿": "Otherwise, it may cause a graphics memory overflow and cause stuttering", - "jittorllms消耗大量的内存": "jittorllms consumes a lot of memory", - "缺少jittorllms的依赖": "Missing dependencies for jittorllms", - "如果要使用jittorllms": "If you want to use jittorllms", - "和": "and", - "两个指令来安装jittorllms的依赖": "Two commands to install jittorllms dependencies", - "在项目根目录运行这两个指令": "Run these two commands in the project root directory", - "安装jittorllms依赖后将完全破坏现有的pytorch环境": "Installing jittorllms dependencies will completely destroy the existing pytorch environment", - "建议使用docker环境!": "It is recommended to use a docker environment!", - "Call jittorllms fail 不能正常加载jittorllms的参数": "Call jittorllms fail, cannot load jittorllms parameters normally", - "不能正常加载jittorllms的参数!": "Cannot load jittorllms parameters normally!", - "触发重置": "Trigger reset", - "等待jittorllms响应中": "Waiting for jittorllms response", - "jittorllms响应异常": "Jittor LMS Response Exception", - "这段代码来源 https": "This code is from https", - "等待输入": "Waiting for input", - "体验gpt-4可以试试api2d": "You can try API2d to experience GPT-4", - "可选 ↓↓↓": "Optional ↓↓↓", - "本地LLM模型如ChatGLM的执行方式 CPU/GPU": "Execution mode of local LLM models such as ChatGLM CPU/GPU", - "设置gradio的并行线程数": "Set the number of parallel threads for Gradio", - "不需要修改": "No modification is needed", - "加一个live2d装饰": "Add a Live2D decoration", - "HotReload的装饰器函数": "Decorator function of HotReload", - "用于实现Python函数插件的热更新": "Used to implement hot updates of Python function plugins", - "函数热更新是指在不停止程序运行的情况下": "Function hot update refers to updating function code in real-time without stopping program execution", - "更新函数代码": "Update function code", - "从而达到实时更新功能": "To achieve real-time update function", - "在装饰器内部": "Inside the decorator", - "使用wraps": "Use wraps", - "代码高亮": "Code Highlighting", - "网页的端口": "Web Port", - "等待多久判定为超时": "Timeout Threshold", - "-1代表随机端口": "-1 represents random port", - "但大部分场合下并不需要修改": "However, it does not need to be modified in most cases", - "发送请求到OpenAI后": "After sending the request to OpenAI", - "上下布局": "Vertical Layout", - "左右布局": "Horizontal Layout", - "对话窗的高度": "Height of the Conversation Window", - "重试的次数限制": "Retry Limit", - "gpt4现在只对申请成功的人开放": "GPT-4 is now only open to those who have successfully applied", - "提高限制请查询": "Please check for higher limits", - "OpenAI模型选择是": "OpenAI Model Selection is", - "网络卡顿、代理失败、KEY失效": "Network Lag, Proxy Failure, KEY Invalid", - "窗口布局": "Window Layout", - "以下配置可以优化体验": "The following configurations can optimize the experience", - "OpenAI绑了信用卡的用户可以填 16 或者更高": "Users who have bound their credit card to OpenAI can fill in 16 or higher", - "如果OpenAI不响应": "If OpenAI does not respond", - "Latex英文纠错": "LatexEnglishCorrection", - "总结音视频": "SummaryAudioVideo", - "动画生成": "AnimationGeneration", - "数学动画生成manim": "MathematicalAnimationGenerationManim", - "test_数学动画生成manim": "test_MathematicalAnimationGenerationManim", - "这里借用了 https": "Here uses https", - "在相对论中": "In relativity", - "找不到任何音频或视频文件": "Cannot find any audio or video files", - "广义坐标": "Generalized coordinates", - "导入依赖失败": "Failed to import dependencies", - "相对速度": "Relative velocity", - "循环监听已打开频道的消息": "Loop to listen to messages in an open channel", - "秒 s": "Seconds s", - "提取视频中的音频": "Extract audio from video", - "解析为简体中文": "Parse to Simplified Chinese", - "等待Claude响应": "Waiting for Claude's response", - "请继续分析其他源代码": "Please continue to analyze other source code", - "3. 勒让德变换公式": "3. Lorentz transformation formula", - "需要被切割的音频文件名": "Name of audio file to be cut", - "Claude回复的片段": "Fragment replied by Claude", - "拉格朗日量": "Lagrangian", - "暂时不支持历史消息": "Historical messages are not supported temporarily", - "从而更全面地理解项目的整体功能": "So as to have a more comprehensive understanding of the overall function of the project", - "建议暂时不要使用": "It is recommended not to use it temporarily", - "整理结果为压缩包": "Organize the results into a compressed package", - "焦耳 J": "Joule J", - "其中 $t$ 为时间": "Where $t$ is time", - "将三个方程变形为增广矩阵形式": "Transform three equations into augmented matrix form", - "获取已打开频道的最新消息并返回消息列表": "Get the latest messages from the opened channel and return a list of messages", - "str类型": "str type", - "所有音频都总结完成了吗": "Are all audio summaries completed?", - "SummaryAudioVideo内容": "SummaryAudioVideo content", - "使用教程详情见 request_llms/README.md": "See request_llms/README.md for detailed usage instructions", - "删除中间文件夹": "Delete intermediate folder", - "Claude组件初始化成功": "Claude component initialized successfully", - "$c$ 是光速": "$c$ is the speed of light", - "参考文献转Bib": "Convert reference to Bib", - "发送到openai音频解析终端": "Send to openai audio parsing terminal", - "不能加载Claude组件": "Cannot load Claude component", - "千克 kg": "Kilogram kg", - "切割音频文件": "Cut audio file", - "方法": "Method", - "设置API_KEY": "Set API_KEY", - "然后转移到指定的另一个路径中": "Then move to a specified path", - "正在加载Claude组件": "Loading Claude component", - "极端速度v下的一个相对独立观测者测得的时间": "The time measured by a relatively independent observer at extreme speed v", - "广义速度": "Generalized velocity", - "粒子的固有": "Intrinsic of particle", - "一个包含所有切割音频片段文件路径的列表": "A list containing the file paths of all segmented audio clips", - "计算文件总时长和切割点": "Calculate total duration and cutting points of the file", - "总结音频": "Summarize audio", - "作者": "Author", - "音频内容是": "The content of the audio is", - "\\frac{v^2}{c^2}}}$ 是洛伦兹因子": "$\\frac{v^2}{c^2}}}$ is the Lorentz factor", - "辅助gpt生成代码": "Assist GPT in generating code", - "读取文件内容到内存": "Read file content into memory", - "以秒为单位": "In seconds", - "米每秒 m/s": "Meters per second m/s", - "物体的质量": "Mass of the object", - "请对下面的音频片段做概述": "Please summarize the following audio clip", - "t是原始坐标系下的物理量": "t is a physical quantity in the original coordinate system", - "获取回复": "Get reply", - "正在处理": "Processing", - "将音频解析为简体中文": "Parse audio into Simplified Chinese", - "音频解析结果": "Audio parsing result", - "在这里放一些网上搜集的demo": "Put some demos collected online here", - "”的主要内容": "The main content of ", - "将": "Convert", - "请用一句话概括这些文件的整体功能": "Please summarize the overall function of these files in one sentence", - "P.S. 其他可用的模型还包括": "P.S. Other available models include", - "创建存储切割音频的文件夹": "Create folder to store segmented audio", - "片段": "Segment", - "批量SummaryAudioVideo": "Batch Summary Audio Video", - "单位": "Unit", - "1. 等效质量-能量关系式": "1. Equivalent quality-energy relationship formula", - "模型选择是": "Model selection is", - "使用中文总结音频“": "Use Chinese to summarize audio", - "音频文件名": "Audio file name", - "LLM_MODEL是默认选中的模型": "LLM_MODEL is the default selected model", - "异步方法": "Asynchronous method", - "文本碎片重组为完整的tex文件": "Reassemble text fragments into a complete tex file", - "请对这部分内容进行语法矫正": "Please correct the grammar of this part", - "打开你的科学上网软件查看代理的协议": "Open your scientific Internet access software to view the proxy agreement", - "调用openai api 使用whisper-1模型": "Call openai api to use whisper-1 model", - "此处可以输入解析提示": "Parsing tips can be entered here", - "报告如何远程获取": "Report how to obtain remotely", - "将代码转为动画": "Convert code to animation", - "Claude失败": "Claude failed", - "等待Claude响应中": "Waiting for Claude's response", - "目前不支持历史消息查询": "Historical message queries are currently not supported", - "把某个路径下所有文件压缩": "Compress all files under a certain path", - "论文概况": "Overview of the paper", - "参见https": "See https", - "如果要使用Claude": "If you want to use Claude", - "2. 洛伦兹变换式": "2. Lorentz transformation formula", - "通过调用conversations_open方法打开一个频道": "Open a channel by calling the conversations_open method", - "当前参数": "Current parameters", - "安装Claude的依赖": "Install Claude's dependencies", - "生成的视频文件路径": "Generated video file path", - "注意目前不能多人同时调用Claude接口": "Note that multiple people cannot currently call the Claude interface at the same time", - "获取Slack消息失败": "Failed to get Slack message", - "翻译结果": "Translation result", - "调用Claude时": "When calling Claude", - "已知某些代码的局部作用是": "It is known that the local effect of some code is", - "根据给定的切割时长将音频文件切割成多个片段": "Cut the audio file into multiple segments according to the given cutting duration", - "请稍候": "Please wait", - "向已打开的频道发送一条文本消息": "Send a text message to the opened channel", - "每个切割音频片段的时长": "The duration of each cut audio segment", - "Claude响应缓慢": "Claude responds slowly", - "然后重启程序": "Then restart the program", - "因为在同一个频道里存在多人使用时历史消息渗透问题": "Because there is a problem of historical message penetration when multiple people use it in the same channel", - "其中": "Among them", - "gpt写的": "Written by GPT", - "报告已经添加到右侧“文件上传区”": "The report has been added to the 'File Upload Area' on the right", - "目前支持的格式": "Supported formats at present", - "英文Latex项目全文纠错": "Full-text correction of English Latex projects", - "光速": "Speed of light", - "表示频道ID": "Representing channel ID", - "读取音频文件": "Reading audio files", - "数学AnimationGeneration": "Mathematical Animation Generation", - "开始生成动画": "Start generating animation", - "否则将导致每个人的Claude问询历史互相渗透": "Otherwise, everyone's Claude inquiry history will be mutually infiltrated", - "如果需要使用Slack Claude": "If you need to use Slack Claude", - "防止丢失最后一条消息": "Prevent the last message from being lost", - "开始": "Start", - "Claude响应异常": "Claude responds abnormally", - "并将返回的频道ID保存在属性CHANNEL_ID中": "And save the returned channel ID in the property CHANNEL_ID", - "4. 时间膨胀公式": "4. Time dilation formula", - "属性": "Attribute", - "一些常见的公式包括": "Some common formulas include", - "时间": "Time", - "物体的能量": "Energy of an object", - "对整个Latex项目进行纠错": "Correcting the entire Latex project", - "此插件处于开发阶段": "This plugin is in the development stage", - "实现消息发送、接收等功能": "Implement message sending, receiving and other functions", - "生成数学动画": "Generate mathematical animations", - "设置OpenAI密钥和模型": "Set OpenAI key and model", - "默认值为1000": "Default value is 1000", - "调用whisper模型音频转文字": "Call whisper model to convert audio to text", - "否则结束循环": "Otherwise end the loop", - "等待Claude回复的片段": "Wait for the segment replied by Claude", - "这些公式描述了质量-能量转换、相对论引起的空间时变形、描述物理系统的拉格朗日力学、以及时间膨胀等现象": "These formulas describe phenomena such as mass-energy conversion, space-time deformation caused by relativity, Lagrangian mechanics describing physical systems, and time dilation.", - "则无需填写NEWBING_COOKIES": "Then there is no need to fill in NEWBING_COOKIES", - "SlackClient类用于与Slack API进行交互": "The SlackClient class is used to interact with the Slack API", - "同时它必须被包含在AVAIL_LLM_MODELS切换列表中": "At the same time, it must be included in the AVAIL_LLM_MODELS switch list", - "段音频完成了吗": "Is the segment audio completed?", - "提取文件扩展名": "Extract the file extension", - "段音频的第": "The", - "段音频的主要内容": "The main content of the segment audio is", - "z$ 分别是空间直角坐标系中的三个坐标": "z$, respectively, are the three coordinates in the spatial rectangular coordinate system", - "这个是怎么识别的呢我也不清楚": "I'm not sure how this is recognized", - "从现在起": "From now on", - "连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion", - "联网的ChatGPT_bing版": "OnlineChatGPT_BingEdition", - "Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage", - "Langchain知识库": "LangchainKnowledgeBase", - "Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison", - "Latex输出PDF": "OutputPDFFromLatex", - "Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF", - "sprint亮靛": "SprintIndigo", - "寻找Latex主文件": "FindLatexMainFile", - "专业词汇声明": "ProfessionalTerminologyDeclaration", - "Latex精细分解与转化": "DecomposeAndConvertLatex", - "编译Latex": "CompileLatex", - "如果您是论文原作者": "If you are the original author of the paper", - "正在编译对比PDF": "Compiling the comparison PDF", - "将 \\include 命令转换为 \\input 命令": "Converting the \\include command to the \\input command", - "取评分最高者返回": "Returning the highest-rated one", - "不要修改!! 高危设置!通过修改此设置": "Do not modify!! High-risk setting! By modifying this setting", - "Tex源文件缺失!": "Tex source file is missing!", - "6.25 加入判定latex模板的代码": "Added code to determine the latex template on June 25", - "正在精细切分latex文件": "Finely splitting the latex file", - "获取response失败": "Failed to get response", - "手动指定语言": "Manually specify the language", - "输入arxivID": "Enter arxivID", - "对输入的word文档进行摘要生成": "Generate a summary of the input word document", - "将指定目录下的PDF文件从英文翻译成中文": "Translate PDF files from English to Chinese in the specified directory", - "如果分析错误": "If the analysis is incorrect", - "尝试第": "Try the", - "用户填3": "User fills in 3", - "请在此处追加更细致的矫错指令": "Please append more detailed correction instructions here", - "为了防止大语言模型的意外谬误产生扩散影响": "To prevent the accidental spread of errors in large language models", - "前面是中文冒号": "The colon before is in Chinese", - "内含已经翻译的Tex文档": "Contains a Tex document that has been translated", - "成功啦": "Success!", - "刷新页面即可以退出UpdateKnowledgeArchive模式": "Refresh the page to exit UpdateKnowledgeArchive mode", - "或者不在环境变量PATH中": "Or not in the environment variable PATH", - "--读取文件": "--Read the file", - "才能继续下面的步骤": "To continue with the next steps", - "代理数据解析失败": "Proxy data parsing failed", - "详见项目主README.md": "See the main README.md of the project for details", - "临时存储用于调试": "Temporarily stored for debugging", - "屏蔽空行和太短的句子": "Filter out empty lines and sentences that are too short", - "gpt 多线程请求": "GPT multi-threaded request", - "编译已经开始": "Compilation has started", - "无法找到一个主Tex文件": "Cannot find a main Tex file", - "修复括号": "Fix parentheses", - "请您不要删除或修改这行警告": "Please do not delete or modify this warning", - "请登录OpenAI查看详情 https": "Please log in to OpenAI to view details at https", - "调用函数": "Call a function", - "请查看终端的输出或耐心等待": "Please check the output in the terminal or wait patiently", - "LatexEnglishCorrection+高亮修正位置": "Latex English correction + highlight correction position", - "行": "line", - "Newbing 请求失败": "Newbing request failed", - "转化PDF编译是否成功": "Check if the conversion to PDF and compilation were successful", - "建议更换代理协议": "Recommend changing the proxy protocol", - "========================================= 插件主程序1 =====================================================": "========================================= Plugin Main Program 1 =====================================================", - "终端": "terminal", - "请先上传文件素材": "Please upload file materials first", - "前面是中文逗号": "There is a Chinese comma in front", - "请尝试把以下指令复制到高级参数区": "Please try copying the following instructions to the advanced parameters section", - "翻译-": "Translation -", - "请耐心等待": "Please be patient", - "将前后断行符脱离": "Remove line breaks before and after", - "json等": "JSON, etc.", - "生成中文PDF": "Generate Chinese PDF", - "用红色标注处保留区": "Use red color to highlight the reserved area", - "对比PDF编译是否成功": "Compare if the PDF compilation was successful", - "回答完问题后": "After answering the question", - "其他操作系统表现未知": "Unknown performance on other operating systems", - "-构建知识库": "Build knowledge base", - "还原原文": "Restore original text", - "或者重启之后再度尝试": "Or try again after restarting", - "免费": "Free", - "仅在Windows系统进行了测试": "Tested only on Windows system", - "欢迎加REAME中的QQ联系开发者": "Feel free to contact the developer via QQ in REAME", - "当前知识库内的有效文件": "Valid files in the current knowledge base", - "您可以到Github Issue区": "You can go to the Github Issue area", - "刷新Gradio前端界面": "Refresh the Gradio frontend interface", - "吸收title与作者以上的部分": "Include the title and the above part of the author", - "给出一些判定模板文档的词作为扣分项": "Provide some words in the template document as deduction items", - "--读取参数": "-- Read parameters", - "然后进行问答": "And then perform question-answering", - "根据自然语言执行插件命令": "Execute plugin commands based on natural language", - "*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{Warning", - "但请查收结果": "But please check the results", - "翻译内容可靠性无保障": "No guarantee of translation accuracy", - "寻找主文件": "Find the main file", - "消耗时间的函数": "Time-consuming function", - "当前语言模型温度设定": "Current language model temperature setting", - "这需要一段时间计算": "This requires some time to calculate", - "为啥chatgpt会把cite里面的逗号换成中文逗号呀": "Why does ChatGPT change commas inside 'cite' to Chinese commas?", - "发现已经存在翻译好的PDF文档": "Found an already translated PDF document", - "待提取的知识库名称id": "Knowledge base name ID to be extracted", - "文本碎片重组为完整的tex片段": "Reassemble text fragments into complete tex fragments", - "注意事项": "Notes", - "参数说明": "Parameter description", - "或代理节点": "Or proxy node", - "构建知识库": "Building knowledge base", - "报错信息如下. 如果是与网络相关的问题": "Error message as follows. If it is related to network issues", - "功能描述": "Function description", - "禁止移除或修改此警告": "Removal or modification of this warning is prohibited", - "Arixv翻译": "Arixv translation", - "读取优先级": "Read priority", - "包含documentclass关键字": "Contains the documentclass keyword", - "根据文本使用GPT模型生成相应的图像": "Generate corresponding images using GPT model based on the text", - "图像生成所用到的提示文本": "Prompt text used for image generation", - "Your account is not active. OpenAI以账户失效为由": "Your account is not active. OpenAI states that it is due to account expiration", - "快捷的调试函数": "Convenient debugging function", - "在多Tex文档中": "In multiple Tex documents", - "因此选择GenerateImage函数": "Therefore, choose the GenerateImage function", - "当前工作路径为": "The current working directory is", - "实际得到格式": "Obtained format in reality", - "这段代码定义了一个名为TempProxy的空上下文管理器": "This code defines an empty context manager named TempProxy", - "吸收其他杂项": "Absorb other miscellaneous items", - "请输入要翻译成哪种语言": "Please enter which language to translate into", - "的单词": "of the word", - "正在尝试自动安装": "Attempting automatic installation", - "如果有必要": "If necessary", - "开始下载": "Start downloading", - "项目Github地址 \\url{https": "Project GitHub address \\url{https", - "将根据报错信息修正tex源文件并重试": "The Tex source file will be corrected and retried based on the error message", - "发送至azure openai api": "Send to Azure OpenAI API", - "吸收匿名公式": "Absorb anonymous formulas", - "用该压缩包+ConversationHistoryArchive进行反馈": "Provide feedback using the compressed package + ConversationHistoryArchive", - "需要特殊依赖": "Requires special dependencies", - "还原部分原文": "Restore part of the original text", - "构建完成": "Build completed", - "解析arxiv网址失败": "Failed to parse arXiv URL", - "输入问题后点击该插件": "Click the plugin after entering the question", - "请求子进程": "Requesting subprocess", - "请务必用 pip install -r requirements.txt 指令安装依赖": "Please make sure to install the dependencies using the 'pip install -r requirements.txt' command", - "如果程序停顿5分钟以上": "If the program pauses for more than 5 minutes", - "转化PDF编译已经成功": "Conversion to PDF compilation was successful", - "虽然PDF生成失败了": "Although PDF generation failed", - "分析上述回答": "Analyze the above answer", - "吸收在42行以内的begin-end组合": "Absorb the begin-end combination within 42 lines", - "推荐http": "Recommend http", - "Latex没有安装": "Latex is not installed", - "用latex编译为PDF对修正处做高亮": "Compile to PDF using LaTeX and highlight the corrections", - "reverse 操作必须放在最后": "'reverse' operation must be placed at the end", - "AZURE OPENAI API拒绝了请求": "AZURE OPENAI API rejected the request", - "该项目的Latex主文件是": "The main LaTeX file of this project is", - "You are associated with a deactivated account. OpenAI以账户失效为由": "You are associated with a deactivated account. OpenAI considers it as an account expiration", - "它*必须*被包含在AVAIL_LLM_MODELS列表中": "It *must* be included in the AVAIL_LLM_MODELS list", - "未知指令": "Unknown command", - "尝试执行Latex指令失败": "Failed to execute the LaTeX command", - "摘要生成后的文档路径": "Path of the document after summary generation", - "GPT结果已输出": "GPT result has been outputted", - "使用Newbing": "Using Newbing", - "其他模型转化效果未知": "Unknown conversion effect of other models", - "P.S. 但愿没人把latex模板放在里面传进来": "P.S. Hopefully, no one passes a LaTeX template in it", - "定位主Latex文件": "Locate the main LaTeX file", - "后面是英文冒号": "English colon follows", - "文档越长耗时越长": "The longer the document, the longer it takes.", - "压缩包": "Compressed file", - "但通常不会出现在正文": "But usually does not appear in the body.", - "正在预热文本向量化模组": "Preheating text vectorization module", - "5刀": "5 dollars", - "提问吧! 但注意": "Ask questions! But be careful", - "发送至AZURE OPENAI API": "Send to AZURE OPENAI API", - "请仔细鉴别并以原文为准": "Please carefully verify and refer to the original text", - "如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "If you need to use AZURE, please refer to the additional document docs\\use_azure.md for details", - "使用正则表达式查找半行注释": "Use regular expressions to find inline comments", - "只有第二步成功": "Only the second step is successful", - "P.S. 顺便把CTEX塞进去以支持中文": "P.S. By the way, include CTEX to support Chinese", - "安装方法https": "Installation method: https", - "则跳过GPT请求环节": "Then skip the GPT request process", - "请切换至“UpdateKnowledgeArchive”插件进行知识库访问": "Please switch to the 'UpdateKnowledgeArchive' plugin for knowledge base access", - "=================================== 工具函数 ===============================================": "=================================== Utility functions ===============================================", - "填入azure openai api的密钥": "Fill in the Azure OpenAI API key", - "上传Latex压缩包": "Upload LaTeX compressed file", - "远程云服务器部署": "Deploy to remote cloud server", - "用黑色标注转换区": "Use black color to annotate the conversion area", - "音频文件的路径": "Path to the audio file", - "必须包含documentclass": "Must include documentclass", - "再列出用户可能提出的三个问题": "List three more questions that the user might ask", - "根据需要切换prompt": "Switch the prompt as needed", - "将文件复制一份到下载区": "Make a copy of the file in the download area", - "次编译": "Second compilation", - "Latex文件融合完成": "LaTeX file merging completed", - "返回": "Return", - "后面是英文逗号": "Comma after this", - "对不同latex源文件扣分": "Deduct points for different LaTeX source files", - "失败啦": "Failed", - "编译BibTex": "Compile BibTeX", - "Linux下必须使用Docker安装": "Must install using Docker on Linux", - "报错信息": "Error message", - "删除或修改歧义文件": "Delete or modify ambiguous files", - "-预热文本向量化模组": "- Preheating text vectorization module", - "将每次对话记录写入Markdown格式的文件中": "Write each conversation record into a file in Markdown format", - "其他类型文献转化效果未知": "Unknown conversion effect for other types of literature", - "获取线程锁": "Acquire thread lock", - "使用英文": "Use English", - "如果存在调试缓存文件": "If there is a debug cache file", - "您需要首先调用构建知识库": "You need to call the knowledge base building first", - "原始PDF编译是否成功": "Whether the original PDF compilation is successful", - "生成 azure openai api请求": "Generate Azure OpenAI API requests", - "正在编译PDF": "Compiling PDF", - "仅调试": "Debug only", - "========================================= 插件主程序2 =====================================================": "========================================= Plugin Main Program 2 =====================================================", - "多线程翻译开始": "Multithreaded translation begins", - "出问题了": "There is a problem", - "版权归原文作者所有": "Copyright belongs to the original author", - "当前大语言模型": "Current large language model", - "目前对机器学习类文献转化效果最好": "Currently, the best conversion effect for machine learning literature", - "这个paper有个input命令文件名大小写错误!": "This paper has an input command with a filename case error!", - "期望格式例如": "Expected format, for example", - "解决部分词汇翻译不准确的问题": "Resolve the issue of inaccurate translation for some terms", - "待注入的知识库名称id": "Name/ID of the knowledge base to be injected", - "精细切分latex文件": "Fine-grained segmentation of LaTeX files", - "永远给定None": "Always given None", - "work_folder = Latex预处理": "work_folder = LaTeX preprocessing", - "请直接去该路径下取回翻译结果": "Please directly go to the path to retrieve the translation results", - "寻找主tex文件": "Finding the main .tex file", - "模型参数": "Model parameters", - "返回找到的第一个": "Return the first one found", - "编译转化后的PDF": "Compile the converted PDF", - "\\SEAFILE_LOCALŅ03047\\我的资料库\\music\\Akie秋绘-未来轮廓.mp3": "\\SEAFILE_LOCALŅ03047\\My Library\\music\\Akie秋绘-未来轮廓.mp3", - "拆分过长的latex片段": "Splitting overly long LaTeX fragments", - "没有找到任何可读取文件": "No readable files found", - "暗色模式 / 亮色模式": "Dark mode / Light mode", - "检测到arxiv文档连接": "Detected arXiv document link", - "此插件Windows支持最佳": "This plugin has best support for Windows", - "from crazy_functions.虚空终端 import 终端": "from crazy_functions.null_terminal import Terminal", - "本地论文翻译": "Local paper translation", - "输出html调试文件": "Output HTML debugging file", - "以下所有配置也都支持利用环境变量覆写": "All the following configurations can also be overridden using environment variables", - "PDF文件所在的路径": "Path of the PDF file", - "也是可读的": "It is also readable", - "将消耗较长时间下载中文向量化模型": "Downloading Chinese vectorization model will take a long time", - "环境变量配置格式见docker-compose.yml": "See docker-compose.yml for the format of environment variable configuration", - "编译文献交叉引用": "Compile bibliographic cross-references", - "默认为default": "Default is 'default'", - "或者使用此插件继续上传更多文件": "Or use this plugin to continue uploading more files", - "该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "This PDF is generated by the GPT-Academic open-source project using a large language model + LaTeX translation plugin", - "使用latexdiff生成论文转化前后对比": "Use latexdiff to generate before and after comparison of paper transformation", - "正在编译PDF文档": "Compiling PDF document", - "读取config.py文件中关于AZURE OPENAI API的信息": "Read the information about AZURE OPENAI API from the config.py file", - "配置教程&视频教程": "Configuration tutorial & video tutorial", - "临时地启动代理网络": "Temporarily start proxy network", - "临时地激活代理网络": "Temporarily activate proxy network", - "功能尚不稳定": "Functionality is unstable", - "默认为Chinese": "Default is Chinese", - "请查收结果": "Please check the results", - "将 chatglm 直接对齐到 chatglm2": "Align chatglm directly to chatglm2", - "中读取数据构建知识库": "Build a knowledge base by reading data in", - "用于给一小段代码上代理": "Used to proxy a small piece of code", - "分析结果": "Analysis results", - "依赖不足": "Insufficient dependencies", - "Markdown翻译": "Markdown translation", - "除非您是论文的原作者": "Unless you are the original author of the paper", - "test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBase read", - "将多文件tex工程融合为一个巨型tex": "Merge multiple tex projects into one giant tex", - "吸收iffalse注释": "Absorb iffalser comments", - "您接下来不能再使用其他插件了": "You can no longer use other plugins next", - "正在构建知识库": "Building knowledge base", - "需Latex": "Requires Latex", - "即找不到": "That is not found", - "保证括号正确": "Ensure parentheses are correct", - "= 2 通过一些Latex模板中常见": "= 2 through some common Latex templates", - "请立即终止程序": "Please terminate the program immediately", - "解压失败! 需要安装pip install rarfile来解压rar文件": "Decompression failed! Install 'pip install rarfile' to decompress rar files", - "请在此处给出自定义翻译命令": "Please provide custom translation command here", - "解压失败! 需要安装pip install py7zr来解压7z文件": "Decompression failed! Install 'pip install py7zr' to decompress 7z files", - "执行错误": "Execution error", - "目前仅支持GPT3.5/GPT4": "Currently only supports GPT3.5/GPT4", - "P.S. 顺便把Latex的注释去除": "P.S. Also remove comments from Latex", - "写出文件": "Write out the file", - "当前报错的latex代码处于第": "The current error in the LaTeX code is on line", - "主程序即将开始": "Main program is about to start", - "详情信息见requirements.txt": "See details in requirements.txt", - "释放线程锁": "Release thread lock", - "由于最为关键的转化PDF编译失败": "Due to the critical failure of PDF conversion and compilation", - "即将退出": "Exiting soon", - "尝试下载": "Attempting to download", - "删除整行的空注释": "Remove empty comments from the entire line", - "也找不到": "Not found either", - "从一批文件": "From a batch of files", - "编译结束": "Compilation finished", - "调用缓存": "Calling cache", - "只有GenerateImage和生成图像相关": "Only GenerateImage and image generation related", - "待处理的word文档路径": "Path of the word document to be processed", - "是否在提交时自动清空输入框": "Whether to automatically clear the input box upon submission", - "检查结果": "Check the result", - "生成时间戳": "Generate a timestamp", - "编译原始PDF": "Compile the original PDF", - "填入ENGINE": "Fill in ENGINE", - "填入api版本": "Fill in the API version", - "中文Bing版": "Chinese Bing version", - "当前支持的格式包括": "Currently supported formats include", - "交互功能模板函数": "InteractiveFunctionTemplateFunction", - "交互功能函数模板": "InteractiveFunctionFunctionTemplate", - "语音助手": "VoiceAssistant", - "微调数据集生成": "FineTuneDatasetGeneration", - "chatglm微调工具": "ChatGLMFineTuningTool", - "启动微调": "StartFineTuning", - "请讲话": "Please speak", - "正在听您讲话": "Listening to you", - "对这个人外貌、身处的环境、内心世界、过去经历进行描写": "Describe the appearance, environment, inner world, and past experiences of this person", - "请向下翻": "Please scroll down", - "实时音频采集": "Real-time audio collection", - "找不到": "Not found", - "在一个异步线程中采集音频": "Collect audio in an asynchronous thread", - "azure和api2d请求源": "Azure and API2D request source", - "等待ChatGLMFT响应中": "Waiting for ChatGLMFT response", - "如果使用ChatGLM2微调模型": "If using ChatGLM2 fine-tuning model", - "把文件复制过去": "Copy the file over", - "可选": "Optional", - "ChatGLMFT响应异常": "ChatGLMFT response exception", - "上传本地文件/压缩包供函数插件调用": "Upload local files/compressed packages for function plugin calls", - "例如 f37f30e0f9934c34a992f6f64f7eba4f": "For example, f37f30e0f9934c34a992f6f64f7eba4f", - "正在等您说完问题": "Waiting for you to finish the question", - "解除插件状态": "Release plugin status", - "详情见https": "See details at https", - "避免线程阻塞": "Avoid thread blocking", - "先上传数据集": "Upload dataset first", - "请直接提交即可": "Submit directly", - "Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数": "Call ChatGLMFT fail, cannot load ChatGLMFT parameters", - "插件可读取“输入区”文本/路径作为参数": "The plugin can read text/path in the input area as parameters", - "给出指令": "Give instructions", - "暂不提交": "Do not submit for now", - "如 绿帽子*深蓝色衬衫*黑色运动裤": "E.g. green hat * dark blue shirt * black sports pants", - "阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "Aliyun real-time speech recognition has high configuration difficulty and is only recommended for advanced users. Refer to https", - "ChatGLMFT尚未加载": "ChatGLMFT has not been loaded yet", - "输入 clear 以清空对话历史": "Enter 'clear' to clear the conversation history", - "可以将自身的状态存储到cookie中": "You can store your own status in cookies", - "填入你亲手写的部署名": "Fill in the deployment name you wrote by yourself", - "该选项即将被弃用": "This option will be deprecated soon", - "代理网络配置": "Proxy network configuration", - "每秒采样数量": "Number of samples per second", - "使用时": "When using", - "想象一个穿着者": "Imagine a wearer", - "如果已经存在": "If it already exists", - "例如您可以将以下命令复制到下方": "For example, you can copy the following command below", - "正在锁定插件": "Locking plugin", - "使用": "Use", - "读 docs\\use_azure.md": "Read docs\\use_azure.md", - "开始最终总结": "Start final summary", - "openai的官方KEY需要伴随组织编码": "Openai's official KEY needs to be accompanied by organizational code", - "将子线程的gpt结果写入chatbot": "Write the GPT result of the sub-thread into the chatbot", - "Arixv论文精细翻译": "Fine translation of Arixv paper", - "开始接收chatglmft的回复": "Start receiving replies from chatglmft", - "请先将.doc文档转换为.docx文档": "Please convert .doc documents to .docx documents first", - "避免多用户干扰": "Avoid multiple user interference", - "清空label": "Clear label", - "解除插件锁定": "Unlock plugin", - "请以以下方式load模型!!!": "Please load the model in the following way!!!", - "没给定指令": "No instruction given", - "100字以内": "Within 100 words", - "获取关键词": "Get keywords", - "欢迎使用 MOSS 人工智能助手!": "Welcome to use MOSS AI assistant!", - "音频助手": "Audio assistant", - "上传Latex项目": "Upload Latex project", - "对话助手函数插件": "Chat assistant function plugin", - "如果一句话小于7个字": "If a sentence is less than 7 words", - "640个字节为一组": "640 bytes per group", - "右下角更换模型菜单中可切换openai": "OpenAI can be switched in the model menu in the lower right corner", - "双手离开鼠标键盘吧": "Take your hands off the mouse and keyboard", - "先删除": "Delete first", - "如果要使用ChatGLMFT": "If you want to use ChatGLMFT", - "例如 RoPlZrM88DnAFkZK": "For example, RoPlZrM88DnAFkZK", - "提取总结": "Extract summary", - "ChatGLMFT消耗大量的内存": "ChatGLMFT consumes a lot of memory", - "格式如org-123456789abcdefghijklmno的": "In the format of org-123456789abcdefghijklmno", - "在执行完成之后": "After execution is complete", - "此处填API密钥": "Fill in the API key here", - "chatglmft 没有 sys_prompt 接口": "ChatGLMFT does not have a sys_prompt interface", - "用第二人称": "Use the second person", - "Chuanhu-Small-and-Beautiful主题": "Chuanhu-Small-and-Beautiful theme", - "请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期": "Please check if ALIYUN_TOKEN and ALIYUN_APPKEY have expired", - "还需要填写组织": "You also need to fill in the organization", - "会直接转到该函数": "Will directly jump to the function", - "初始化插件状态": "Initializing plugin status", - "插件锁定中": "Plugin is locked", - "如果这里报错": "If there is an error here", - "本地Latex论文精细翻译": "Local Latex paper fine translation", - "极少数情况下": "In very few cases", - "首先你在中文语境下通读整篇论文": "First, read the entire paper in a Chinese context", - "点击“停止”键可终止程序": "Click the 'Stop' button to terminate the program", - "建议排查": "Suggested troubleshooting", - "没有阿里云语音识别APPKEY和TOKEN": "No Aliyun voice recognition APPKEY and TOKEN", - "避免遗忘导致死锁": "Avoid forgetting to cause deadlock", - "第一次调用": "First call", - "解决插件锁定时的界面显示问题": "Solve the interface display problem when the plugin is locked", - "初始化音频采集线程": "Initialize audio capture thread", - "找不到微调模型检查点": "Cannot find fine-tuning model checkpoint", - "色彩主体": "Color theme", - "上传文件自动修正路径": "Automatically correct the path when uploading files", - "将文件添加到chatbot cookie中": "Add files to chatbot cookie", - "正常状态": "Normal state", - "建议使用英文单词": "Suggest using English words", - "Aliyun音频服务异常": "Aliyun audio service exception", - "格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "Format like org-xxxxxxxxxxxxxxxxxxxxxxxx", - "GPT 学术优化": "GPT academic optimization", - "要求": "Requirement", - "赋予插件状态": "Assign plugin status", - "等待GPT响应": "Waiting for GPT response", - "MOSS can understand and communicate fluently in the language chosen by the user such as English and 中文. MOSS can perform any language-based tasks.": "MOSS can understand and communicate fluently in the language chosen by the user such as English and Chinese. MOSS can perform any language-based tasks.", - "我将为您查找相关壁纸": "I will search for related wallpapers for you", - "当下一次用户提交时": "When the next user submits", - "赋予插件锁定 锁定插件回调路径": "Assign plugin lock, lock plugin callback path", - "处理个别特殊插件的锁定状态": "Handle the lock status of individual special plugins", - "add gpt task 创建子线程请求gpt": "Add GPT task, create sub-thread to request GPT", - "等待用户的再次调用": "Waiting for the user to call again", - "只读": "Read-only", - "用于灵活调整复杂功能的各种参数": "Various parameters used to flexibly adjust complex functions", - "输入 stop 以终止对话": "Enter stop to terminate the conversation", - "缺少ChatGLMFT的依赖": "Missing dependency of ChatGLMFT", - "找 API_ORG 设置项": "Find API_ORG setting item", - "检查config中的AVAIL_LLM_MODELS选项": "Check the AVAIL_LLM_MODELS option in config", - "对这个人外貌、身处的环境、内心世界、人设进行描写": "Describe the appearance, environment, inner world, and character of this person.", - "请输入关键词": "Please enter a keyword.", - "!!!如果需要运行量化版本": "!!! If you need to run the quantitative version.", - "为每一位访问的用户赋予一个独一无二的uuid编码": "Assign a unique uuid code to each visiting user.", - "由于提问含不合规内容被Azure过滤": "Due to Azure filtering out questions containing non-compliant content.", - "欢迎使用 MOSS 人工智能助手!输入内容即可进行对话": "Welcome to use MOSS AI assistant! Enter the content to start the conversation.", - "记住当前的label": "Remember the current label.", - "不能正常加载ChatGLMFT的参数!": "Cannot load ChatGLMFT parameters normally!", - "建议直接在API_KEY处填写": "It is recommended to fill in directly at API_KEY.", - "创建request": "Create request", - "默认 secondary": "Default secondary", - "会被加在你的输入之前": "Will be added before your input", - "缺少": "Missing", - "前者是API2D的结束条件": "The former is the termination condition of API2D", - "无需填写": "No need to fill in", - "后缀": "Suffix", - "扭转的范围": "Range of twisting", - "是否在触发时清除历史": "Whether to clear history when triggered", - "⭐多线程方法": "⭐Multi-threaded method", - "消耗大量的内存": "Consumes a large amount of memory", - "重组": "Reorganize", - "高危设置! 常规情况下不要修改! 通过修改此设置": "High-risk setting! Do not modify under normal circumstances! Modify this setting", - "检查USE_PROXY": "Check USE_PROXY", - "标注节点的行数范围": "Range of line numbers for annotated nodes", - "即不处理之前的对话历史": "That is, do not process previous conversation history", - "即将编译PDF": "Compiling PDF", - "没有设置ANTHROPIC_API_KEY选项": "ANTHROPIC_API_KEY option is not set", - "非Openai官方接口返回了错误": "Non-Openai official interface returned an error", - "您的 API_KEY 不满足任何一种已知的密钥格式": "Your API_KEY does not meet any known key format", - "格式": "Format", - "不能正常加载": "Cannot load properly", - "🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行": "🏃‍♂️🏃‍♂️🏃‍♂️ Subprocess execution", - "前缀": "Prefix", - "创建AcsClient实例": "Create AcsClient instance", - "⭐主进程执行": "⭐Main process execution", - "增强稳健性": "Enhance robustness", - "用来描述你的要求": "Used to describe your requirements", - "举例": "For example", - "⭐单线程方法": "⭐Single-threaded method", - "后者是OPENAI的结束条件": "The latter is the termination condition of OPENAI", - "防止proxies单独起作用": "Prevent proxies from working alone", - "将两个PDF拼接": "Concatenate two PDFs", - "最后一步处理": "The last step processing", - "正在从github下载资源": "Downloading resources from github", - "失败时": "When failed", - "尚未加载": "Not loaded yet", - "配合前缀可以把你的输入内容用引号圈起来": "With the prefix, you can enclose your input content in quotation marks", - "我好!": "I'm good!", - "默认 False": "Default False", - "的依赖": "Dependencies of", - "并设置参数": "and set parameters", - "会被加在你的输入之后": "Will be added after your input", - "安装": "Installation", - "一个单实例装饰器": "Single instance decorator", - "自定义API KEY格式": "Customize API KEY format", - "的参数": "Parameters of", - "api2d等请求源": "api2d and other request sources", - "逆转出错的段落": "Reverse the wrong paragraph", - "没有设置ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY is not set", - "默认 True": "Default True", - "本项目现已支持OpenAI和Azure的api-key": "This project now supports OpenAI and Azure's api-key", - "即可见": "Visible immediately", - "请问什么是质子": "What is a proton?", - "按钮是否可见": "Is the button visible?", - "调用": "Call", - "如果要使用": "If you want to use", - "的参数!": "parameters!", - "例如翻译、解释代码、润色等等": "such as translation, code interpretation, polishing, etc.", - "响应异常": "Response exception", - "响应中": "Responding", - "请尝试英文Prompt": "Try English Prompt", - "在运行过程中动态地修改多个配置": "Dynamically modify multiple configurations during runtime", - "无法调用相关功能": "Unable to invoke related functions", - "接驳虚空终端": "Connect to Void Terminal", - "虚空终端插件的功能": "Functionality of Void Terminal plugin", - "执行任意插件的命令": "Execute commands of any plugin", - "修改调用函数": "Modify calling function", - "获取简单聊天的默认参数": "Get default parameters for simple chat", - "根据自然语言的描述": "Based on natural language description", - "获取插件的句柄": "Get handle of plugin", - "第四部分": "Part Four", - "在运行过程中动态地修改配置": "Dynamically modify configurations during runtime", - "请先把模型切换至gpt-*或者api2d-*": "Please switch the model to gpt-* or api2d-* first", - "获取简单聊天的句柄": "Get handle of simple chat", - "获取插件的默认参数": "Get default parameters of plugin", - "GROBID服务不可用": "GROBID service is unavailable", - "请问": "May I ask", - "如果等待时间过长": "If the waiting time is too long", - "编程": "programming", - "5. 现在": "5. Now", - "您不必读这个else分支": "You don't have to read this else branch", - "用插件实现": "Implement with plugins", - "插件分类默认选项": "Default options for plugin classification", - "填写多个可以均衡负载": "Filling in multiple can balance the load", - "色彩主题": "Color theme", - "可能附带额外依赖 -=-=-=-=-=-=-": "May come with additional dependencies -=-=-=-=-=-=-", - "讯飞星火认知大模型": "Xunfei Xinghuo cognitive model", - "ParsingLuaProject的所有源文件 | 输入参数为路径": "All source files of ParsingLuaProject | Input parameter is path", - "复制以下空间https": "Copy the following space https", - "如果意图明确": "If the intention is clear", - "如系统是Linux": "If the system is Linux", - "├── 语音功能": "├── Voice function", - "见Github wiki": "See Github wiki", - "⭐ ⭐ ⭐ 立即应用配置": "⭐ ⭐ ⭐ Apply configuration immediately", - "现在您只需要再次重复一次您的指令即可": "Now you just need to repeat your command again", - "没辙了": "No way", - "解析Jupyter Notebook文件 | 输入参数为路径": "Parse Jupyter Notebook file | Input parameter is path", - "⭐ ⭐ ⭐ 确认插件参数": "⭐ ⭐ ⭐ Confirm plugin parameters", - "找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task", - "接驳VoidTerminal": "Connect to VoidTerminal", - "**很好": "**Very good", - "对话|编程": "Conversation&ImageGenerating|Programming", - "对话|编程|学术": "Conversation&ImageGenerating|Programming|Academic", "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model", - "「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper", - "3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.", - "以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper", - "GROBID服务器地址": "GROBID server address", - "修改配置": "Modify configuration", - "理解PDF文档的内容并进行回答 | 输入参数为路径": "Understand the content of the PDF document and answer | Input parameter is path", - "对于需要高级参数的插件": "For plugins that require advanced parameters", - "🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行": "Main process execution 🏃‍♂️🏃‍♂️🏃‍♂️", - "没有填写 HUGGINGFACE_ACCESS_TOKEN": "HUGGINGFACE_ACCESS_TOKEN not filled in", - "调度插件": "Scheduling plugin", - "语言模型": "Language model", - "├── ADD_WAIFU 加一个live2d装饰": "├── ADD_WAIFU Add a live2d decoration", - "初始化": "Initialization", - "选择了不存在的插件": "Selected a non-existent plugin", - "修改本项目的配置": "Modify the configuration of this project", - "如果输入的文件路径是正确的": "If the input file path is correct", - "2. 您可以打开插件下拉菜单以了解本项目的各种能力": "2. You can open the plugin dropdown menu to learn about various capabilities of this project", - "VoidTerminal插件说明": "VoidTerminal plugin description", - "无法理解您的需求": "Unable to understand your requirements", - "默认 AdvancedArgs = False": "Default AdvancedArgs = False", - "「请问Transformer网络的结构是怎样的": "What is the structure of the Transformer network?", - "比如1812.10695": "For example, 1812.10695", - "翻译README或MD": "Translate README or MD", - "读取新配置中": "Reading new configuration", - "假如偏离了您的要求": "If it deviates from your requirements", - "├── THEME 色彩主题": "├── THEME color theme", - "如果还找不到": "If still not found", - "问": "Ask", - "请检查系统字体": "Please check system fonts", - "如果错误": "If there is an error", - "作为替代": "As an alternative", - "ParseJavaProject的所有源文件 | 输入参数为路径": "All source files of ParseJavaProject | Input parameter is path", - "比对相同参数时生成的url与自己代码生成的url是否一致": "Check if the generated URL matches the one generated by your code when comparing the same parameters", - "清除本地缓存数据": "Clear local cache data", - "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL": "Use Google Scholar search assistant to search for results of a specific URL | Input parameter is the URL of Google Scholar search page", - "运行方法": "Running method", - "您已经上传了文件**": "You have uploaded the file **", - "「给爷翻译Arxiv论文": "Translate Arxiv papers for me", - "请修改config中的GROBID_URL": "Please modify GROBID_URL in the config", - "处理特殊情况": "Handling special cases", - "不要自己瞎搞!」": "Don't mess around by yourself!", - "LoadConversationHistoryArchive | 输入参数为路径": "LoadConversationHistoryArchive | Input parameter is a path", - "| 输入参数是一个问题": "| Input parameter is a question", - "├── CHATBOT_HEIGHT 对话窗的高度": "├── CHATBOT_HEIGHT Height of the chat window", - "对C": "To C", - "默认关闭": "Default closed", - "当前进度": "Current progress", - "HUGGINGFACE的TOKEN": "HUGGINGFACE's TOKEN", - "查找可用插件中": "Searching for available plugins", - "下载LLAMA时起作用 https": "Works when downloading LLAMA https", - "使用 AK": "Using AK", - "正在执行任务": "Executing task", - "保存当前的对话 | 不需要输入参数": "Save current conversation | No input parameters required", - "对话": "Conversation", - "图中鲜花怒放": "Flowers blooming in the picture", - "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包": "Batch translate Chinese to English in Markdown files | Input parameter is a path or upload a compressed package", - "ParsingCSharpProject的所有源文件 | 输入参数为路径": "ParsingCSharpProject's all source files | Input parameter is a path", - "为我翻译PDF论文": "Translate PDF papers for me", - "聊天对话": "Chat conversation", - "拼接鉴权参数": "Concatenate authentication parameters", - "请检查config中的GROBID_URL": "Please check the GROBID_URL in the config", - "拼接字符串": "Concatenate strings", - "您的意图可以被识别的更准确": "Your intent can be recognized more accurately", - "该模型有七个 bin 文件": "The model has seven bin files", - "但思路相同": "But the idea is the same", - "你需要翻译": "You need to translate", - "或者描述文件所在的路径": "Or the path of the description file", - "请您上传文件": "Please upload the file", - "不常用": "Not commonly used", - "尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-": "Experimental plugins that have not been fully tested & plugins that require additional dependencies -=--=-", - "⭐ ⭐ ⭐ 选择插件": "⭐ ⭐ ⭐ Select plugin", - "当前配置不允许被修改!如需激活本功能": "The current configuration does not allow modification! To activate this feature", - "正在连接GROBID服务": "Connecting to GROBID service", - "用户图形界面布局依赖关系示意图": "Diagram of user interface layout dependencies", - "是否允许通过自然语言描述修改本页的配置": "Allow modifying the configuration of this page through natural language description", - "self.chatbot被序列化": "self.chatbot is serialized", - "本地Latex论文精细翻译 | 输入参数是路径": "Locally translate Latex papers with fine-grained translation | Input parameter is the path", - "抱歉": "Sorry", - "以下这部分是最早加入的最稳定的模型 -=-=-=-=-=-=-": "The following section is the earliest and most stable model added", - "「用插件翻译README": "Translate README with plugins", - "如果不正确": "If incorrect", - "⭐ ⭐ ⭐ 读取可配置项目条目": "⭐ ⭐ ⭐ Read configurable project entries", - "开始语言对话 | 没有输入参数": "Start language conversation | No input parameters", - "谨慎操作 | 不需要输入参数": "Handle with caution | No input parameters required", - "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包": "Correct the entire English Latex project | Input parameter is the path or upload compressed package", - "如果需要处理文件": "If file processing is required", - "提供图像的内容": "Provide the content of the image", - "查看历史上的今天事件 | 不需要输入参数": "View historical events of today | No input parameters required", - "这个稍微啰嗦一点": "This is a bit verbose", - "多线程解析并翻译此项目的源码 | 不需要输入参数": "Parse and translate the source code of this project in multi-threading | No input parameters required", - "此处打印出建立连接时候的url": "Print the URL when establishing the connection here", - "精准翻译PDF论文为中文 | 输入参数为路径": "Translate PDF papers accurately into Chinese | Input parameter is the path", - "检测到操作错误!当您上传文档之后": "Operation error detected! After you upload the document", - "在线大模型配置关联关系示意图": "Online large model configuration relationship diagram", - "你的填写的空间名如grobid": "Your filled space name such as grobid", - "获取方法": "Get method", - "| 输入参数为路径": "| Input parameter is the path", - "⭐ ⭐ ⭐ 执行插件": "⭐ ⭐ ⭐ Execute plugin", - "├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "├── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description", - "重新页面即可生效": "Refresh the page to take effect", - "设为public": "Set as public", - "并在此处指定模型路径": "And specify the model path here", - "分析用户意图中": "Analyzing user intent", - "刷新下拉列表": "Refresh the drop-down list", - "失败 当前语言模型": "Failed current language model", - "1. 请用**自然语言**描述您需要做什么": "1. Please describe what you need to do in **natural language**", - "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包": "Translate the full text of Latex projects from Chinese to English | Input parameter is the path or upload a compressed package", - "没有配置BAIDU_CLOUD_API_KEY": "No configuration for BAIDU_CLOUD_API_KEY", - "设置默认值": "Set default value", - "如果太多了会导致gpt无法理解": "If there are too many, it will cause GPT to be unable to understand", - "绿草如茵": "Green grass", - "├── LAYOUT 窗口布局": "├── LAYOUT window layout", - "用户意图理解": "User intent understanding", - "生成RFC1123格式的时间戳": "Generate RFC1123 formatted timestamp", - "欢迎您前往Github反馈问题": "Welcome to go to Github to provide feedback", - "排除已经是按钮的插件": "Exclude plugins that are already buttons", - "亦在下拉菜单中显示": "Also displayed in the dropdown menu", - "导致无法反序列化": "Causing deserialization failure", - "意图=": "Intent =", - "章节": "Chapter", - "调用插件": "Invoke plugin", - "ParseRustProject的所有源文件 | 输入参数为路径": "All source files of ParseRustProject | Input parameter is path", - "需要点击“函数插件区”按钮进行处理": "Need to click the 'Function Plugin Area' button for processing", - "默认 AsButton = True": "Default AsButton = True", - "收到websocket错误的处理": "Handling websocket errors", - "用插件": "Use Plugin", - "没有选择任何插件组": "No plugin group selected", - "答": "Answer", - "可修改成本地GROBID服务": "Can modify to local GROBID service", - "用户意图": "User intent", - "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包": "Polish the full text of English Latex projects | Input parameters are paths or uploaded compressed packages", - "「我不喜欢当前的界面颜色": "I don't like the current interface color", - "「请调用插件": "Please call the plugin", - "VoidTerminal状态": "VoidTerminal status", - "新配置": "New configuration", - "支持Github链接": "Support Github links", - "没有配置BAIDU_CLOUD_SECRET_KEY": "No BAIDU_CLOUD_SECRET_KEY configured", - "获取当前VoidTerminal状态": "Get the current VoidTerminal status", - "刷新按钮": "Refresh button", - "为了防止pickle.dumps": "To prevent pickle.dumps", - "放弃治疗": "Give up treatment", - "可指定不同的生成长度、top_p等相关超参": "Can specify different generation lengths, top_p and other related hyperparameters", - "请将题目和摘要翻译为": "Translate the title and abstract", - "通过appid和用户的提问来生成请参数": "Generate request parameters through appid and user's question", - "ImageGeneration | 输入参数字符串": "ImageGeneration | Input parameter string", - "将文件拖动到文件上传区": "Drag and drop the file to the file upload area", - "如果意图模糊": "If the intent is ambiguous", - "星火认知大模型": "Spark Cognitive Big Model", - "默认 Color = secondary": "Default Color = secondary", - "此处也不需要修改": "No modification is needed here", - "⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent", - "再试一次": "Try again", - "请写bash命令实现以下功能": "Please write a bash command to implement the following function", - "批量SummarizingWordDocuments | 输入参数为路径": "Batch SummarizingWordDocuments | Input parameter is the path", - "/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析": "Parse the python file in /Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns", - "当我要求你写bash命令时": "When I ask you to write a bash command", - "├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框": "├── AUTO_CLEAR_TXT Whether to automatically clear the input box when submitting", - "按停止键终止": "Press the stop key to terminate", - "文心一言": "Original text", - "不能理解您的意图": "Cannot understand your intention", - "用简单的关键词检测用户意图": "Detect user intention with simple keywords", - "中文": "Chinese", - "解析一个C++项目的所有源文件": "Parse all source files of a C++ project", - "请求的Prompt为": "Requested prompt is", - "参考本demo的时候可取消上方打印的注释": "You can remove the comments above when referring to this demo", - "开始接收回复": "Start receiving replies", - "接入讯飞星火大模型 https": "Access to Xunfei Xinghuo large model https", - "用该压缩包进行反馈": "Use this compressed package for feedback", - "翻译Markdown或README": "Translate Markdown or README", - "SK 生成鉴权签名": "SK generates authentication signature", - "插件参数": "Plugin parameters", - "需要访问中文Bing": "Need to access Chinese Bing", - "ParseFrontendProject的所有源文件": "Parse all source files of ParseFrontendProject", - "现在将执行效果稍差的旧版代码": "Now execute the older version code with slightly worse performance", - "您需要明确说明并在指令中提到它": "You need to specify and mention it in the command", - "请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件": "Please set ALLOW_RESET_CONFIG=True in config.py and restart the software", - "按照自然语言描述生成一个动画 | 输入参数是一段话": "Generate an animation based on natural language description | Input parameter is a sentence", - "你的hf用户名如qingxu98": "Your hf username is qingxu98", - "Arixv论文精细翻译 | 输入参数arxiv论文的ID": "Fine translation of Arixv paper | Input parameter is the ID of arxiv paper", - "无法获取 abstract": "Unable to retrieve abstract", - "尽可能地仅用一行命令解决我的要求": "Try to solve my request using only one command", - "提取插件参数": "Extract plugin parameters", - "配置修改完成": "Configuration modification completed", - "正在修改配置中": "Modifying configuration", - "ParsePythonProject的所有源文件": "All source files of ParsePythonProject", - "请求错误": "Request error", - "精准翻译PDF论文": "Accurate translation of PDF paper", - "无法获取 authors": "Unable to retrieve authors", - "该插件诞生时间不长": "This plugin has not been around for long", - "返回项目根路径": "Return project root path", - "BatchSummarizePDFDocuments的内容 | 输入参数为路径": "Content of BatchSummarizePDFDocuments | Input parameter is a path", - "百度千帆": "Baidu Qianfan", - "解析一个C++项目的所有头文件": "Parse all header files of a C++ project", - "现在请您描述您的需求": "Now please describe your requirements", - "该功能具有一定的危险性": "This feature has a certain level of danger", - "收到websocket关闭的处理": "Processing when receiving websocket closure", - "读取Tex论文并写摘要 | 输入参数为路径": "Read Tex paper and write abstract | Input parameter is the path", - "地址为https": "The address is https", - "限制最多前10个配置项": "Limit up to 10 configuration items", - "6. 如果不需要上传文件": "6. If file upload is not needed", - "默认 Group = 对话": "Default Group = Conversation", - "五秒后即将重启!若出现报错请无视即可": "Restarting in five seconds! Please ignore if there is an error", - "收到websocket连接建立的处理": "Processing when receiving websocket connection establishment", - "批量生成函数的注释 | 输入参数为路径": "Batch generate function comments | Input parameter is the path", - "聊天": "Chat", - "但您可以尝试再试一次": "But you can try again", - "千帆大模型平台": "Qianfan Big Model Platform", - "直接运行 python tests/test_plugins.py": "Run python tests/test_plugins.py directly", - "或是None": "Or None", - "进行hmac-sha256进行加密": "Perform encryption using hmac-sha256", - "批量总结音频或视频 | 输入参数为路径": "Batch summarize audio or video | Input parameter is path", - "插件在线服务配置依赖关系示意图": "Plugin online service configuration dependency diagram", - "开始初始化模型": "Start initializing model", - "弱模型可能无法理解您的想法": "Weak model may not understand your ideas", - "解除大小写限制": "Remove case sensitivity restriction", - "跳过提示环节": "Skip prompt section", - "接入一些逆向工程https": "Access some reverse engineering https", - "执行完成": "Execution completed", - "如果需要配置": "If configuration is needed", - "此处不修改;如果使用本地或无地域限制的大模型时": "Do not modify here; if using local or region-unrestricted large models", - "你是一个Linux大师级用户": "You are a Linux master-level user", - "arxiv论文的ID是1812.10695": "The ID of the arxiv paper is 1812.10695", - "而不是点击“提交”按钮": "Instead of clicking the 'Submit' button", - "解析一个Go项目的所有源文件 | 输入参数为路径": "Parse all source files of a Go project | Input parameter is path", - "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包": "Polish the entire text of a Chinese Latex project | Input parameter is path or upload compressed package", - "「生成一张图片": "Generate an image", - "将Markdown或README翻译为中文 | 输入参数为路径或URL": "Translate Markdown or README to Chinese | Input parameters are path or URL", - "训练时间": "Training time", - "将请求的鉴权参数组合为字典": "Combine the requested authentication parameters into a dictionary", - "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包": "Translate the entire text of Latex project from English to Chinese | Input parameters are path or uploaded compressed package", - "内容如下": "The content is as follows", - "用于高质量地读取PDF文档": "Used for high-quality reading of PDF documents", - "上下文太长导致 token 溢出": "The context is too long, causing token overflow", - "├── DARK_MODE 暗色模式 / 亮色模式": "├── DARK_MODE Dark mode / Light mode", - "语言模型回复为": "The language model replies as", - "from crazy_functions.chatglm微调工具 import 微调数据集生成": "from crazy_functions.chatglm fine-tuning tool import fine-tuning dataset generation", - "为您选择了插件": "Selected plugin for you", - "无法获取 title": "Unable to get title", - "收到websocket消息的处理": "Processing of received websocket messages", - "2023年": "2023", - "清除所有缓存文件": "Clear all cache files", - "├── PDF文档精准解析": "├── Accurate parsing of PDF documents", - "论文我刚刚放到上传区了": "I just put the paper in the upload area", - "生成url": "Generate URL", - "以下部分是新加入的模型": "The following section is the newly added model", - "学术": "Academic", - "├── DEFAULT_FN_GROUPS 插件分类默认选项": "├── DEFAULT_FN_GROUPS Plugin classification default options", - "不推荐使用": "Not recommended for use", - "正在同时咨询": "Consulting simultaneously", - "将Markdown翻译为中文 | 输入参数为路径或URL": "Translate Markdown to Chinese | Input parameters are path or URL", - "Github网址是https": "The Github URL is https", - "试着加上.tex后缀试试": "Try adding the .tex suffix", - "对项目中的各个插件进行测试": "Test each plugin in the project", - "插件说明": "Plugin description", - "├── CODE_HIGHLIGHT 代码高亮": "├── CODE_HIGHLIGHT Code highlighting", - "记得用插件": "Remember to use the plugin", - "谨慎操作": "Handle with caution", - "private_upload里面的文件名在解压zip后容易出现乱码": "The file name inside private_upload is prone to garbled characters after unzipping", - "直接返回报错": "Direct return error", - "临时的上传文件夹位置": "Temporary upload folder location", - "使用latex格式 测试3 写出麦克斯韦方程组": "Write Maxwell's equations using latex format for test 3", - "这是一张图片": "This is an image", - "没有发现任何近期上传的文件": "No recent uploaded files found", - "如url未成功匹配返回None": "Return None if the URL does not match successfully", - "如果有Latex环境": "If there is a Latex environment", - "第一次运行时": "When running for the first time", - "创建工作路径": "Create a working directory", - "向": "To", - "执行中. 删除数据": "Executing. Deleting data", - "CodeInterpreter开源版": "CodeInterpreter open source version", - "建议选择更稳定的接口": "It is recommended to choose a more stable interface", - "现在您点击任意函数插件时": "Now when you click on any function plugin", - "请使用“LatexEnglishCorrection+高亮”插件": "Please use the 'LatexEnglishCorrection+Highlight' plugin", - "安装完成": "Installation completed", - "记得用插件!」": "Remember to use the plugin!", - "结论": "Conclusion", - "无法下载资源": "Unable to download resources", - "首先排除一个one-api没有done数据包的第三方Bug情形": "First exclude a third-party bug where one-api does not have a done data package", - "知识库中添加文件": "Add files to the knowledge base", - "处理重名的章节": "Handling duplicate chapter names", - "先上传文件素材": "Upload file materials first", - "无法从google获取信息!": "Unable to retrieve information from Google!", - "展示如下": "Display as follows", - "「把Arxiv论文翻译成中文PDF": "Translate Arxiv papers into Chinese PDF", - "论文我刚刚放到上传区了」": "I just put the paper in the upload area", - "正在下载Gradio主题": "Downloading Gradio themes", - "再运行此插件": "Run this plugin again", - "记录近期文件": "Record recent files", - "粗心检查": "Careful check", - "更多主题": "More themes", - "//huggingface.co/spaces/gradio/theme-gallery 可选": "//huggingface.co/spaces/gradio/theme-gallery optional", - "由 test_on_result_chg": "By test_on_result_chg", - "所有问询记录将自动保存在本地目录./": "All inquiry records will be automatically saved in the local directory ./", - "正在解析论文": "Analyzing the paper", - "逐个文件转移到目标路径": "Move each file to the target path", - "最多重试5次": "Retry up to 5 times", - "日志文件夹的位置": "Location of the log folder", - "我们暂时无法解析此PDF文档": "We are temporarily unable to parse this PDF document", - "文件检索": "File retrieval", - "/**/chatGPT对话历史*.html": "/**/chatGPT conversation history*.html", - "非OpenAI官方接口返回了错误": "Non-OpenAI official interface returned an error", - "如果在Arxiv上匹配失败": "If the match fails on Arxiv", - "文件进入知识库后可长期保存": "Files can be saved for a long time after entering the knowledge base", - "您可以再次重试": "You can try again", - "整理文件集合": "Organize file collection", - "检测到有缺陷的非OpenAI官方接口": "Detected defective non-OpenAI official interface", - "此插件不调用Latex": "This plugin does not call Latex", - "移除过时的旧文件从而节省空间&保护隐私": "Remove outdated old files to save space & protect privacy", - "代码我刚刚打包拖到上传区了」": "I just packed the code and dragged it to the upload area", - "将图像转为灰度图像": "Convert the image to grayscale", - "待排除": "To be excluded", - "请勿修改": "Please do not modify", - "crazy_functions/代码重写为全英文_多线程.py": "crazy_functions/code rewritten to all English_multi-threading.py", - "开发中": "Under development", - "请查阅Gradio主题商店": "Please refer to the Gradio theme store", - "输出消息": "Output message", - "其他情况": "Other situations", - "获取文献失败": "Failed to retrieve literature", - "可以通过再次调用本插件的方式": "You can use this plugin again by calling it", - "保留下半部分": "Keep the lower half", - "排除问题": "Exclude the problem", - "知识库": "Knowledge base", - "ParsePDF失败": "ParsePDF failed", - "向知识库追加更多文档": "Append more documents to the knowledge base", - "此处待注入的知识库名称id": "The knowledge base name ID to be injected here", - "您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin", - "判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law", - "构建知识库后": "After building the knowledge base", - "找不到本地项目或无法处理": "Unable to find local project or unable to process", - "再做一个小修改": "Make another small modification", - "解析整个Matlab项目": "Parse the entire Matlab project", - "需要用GPT提取参数": "Need to extract parameters using GPT", - "文件路径": "File path", - "正在排队": "In queue", - "-=-=-=-=-=-=-=-= 写出第1个文件": "-=-=-=-=-=-=-=-= Write the first file", - "仅翻译后的文本 -=-=-=-=-=-=-=-=": "Translated text only -=-=-=-=-=-=-=-=", - "对话通道": "Conversation channel", - "找不到任何": "Unable to find any", - "正在启动": "Starting", - "开始创建新进程并执行代码! 时间限制": "Start creating a new process and executing the code! Time limit", - "解析Matlab项目": "Parse Matlab project", - "更换UI主题": "Change UI theme", - "⭐ 开始啦 !": "⭐ Let's start!", - "先提取当前英文标题": "First extract the current English title", - "睡一会防止触发google反爬虫": "Sleep for a while to prevent triggering Google anti-crawler", - "测试": "Test", - "-=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out Markdown file", - "如果index是1的话": "If the index is 1", - "VoidTerminal已经实现了类似的代码": "VoidTerminal has already implemented similar code", - "等待线程锁": "Waiting for thread lock", - "那么我们默认代理生效": "Then we default to proxy", - "结果是一个有效文件": "The result is a valid file", - "⭐ 检查模块": "⭐ Check module", - "备份一份History作为记录": "Backup a copy of History as a record", - "作者Binary-Husky": "Author Binary-Husky", - "将csv文件转excel表格": "Convert CSV file to Excel table", - "获取文章摘要": "Get article summary", - "次代码生成尝试": "Attempt to generate code", - "如果参数是空的": "If the parameter is empty", - "请配置讯飞星火大模型的XFYUN_APPID": "Please configure XFYUN_APPID for the Xunfei Starfire model", - "-=-=-=-=-=-=-=-= 写出第2个文件": "Write the second file", - "代码生成阶段结束": "Code generation phase completed", - "则进行提醒": "Then remind", - "处理异常": "Handle exception", - "可能触发了google反爬虫机制": "May have triggered Google anti-crawler mechanism", - "AnalyzeAMatlabProject的所有源文件": "All source files of AnalyzeAMatlabProject", - "写入": "Write", - "我们5秒后再试一次...": "Let's try again in 5 seconds...", - "判断一下用户是否错误地通过对话通道进入": "Check if the user entered through the dialogue channel by mistake", - "结果": "Result", - "2. 如果没有文件": "2. If there is no file", - "由 test_on_sentence_end": "By test_on_sentence_end", - "则直接使用first section name": "Then directly use the first section name", - "太懒了": "Too lazy", - "记录当前的大章节标题": "Record the current chapter title", - "然后再次点击该插件! 至于您的文件": "Then click the plugin again! As for your file", - "此次我们的错误追踪是": "This time our error tracking is", - "首先在arxiv上搜索": "First search on arxiv", - "被新插件取代": "Replaced by a new plugin", - "正在处理文件": "Processing file", - "除了连接OpenAI之外": "In addition to connecting OpenAI", - "我们检查一下": "Let's check", - "进度": "Progress", - "处理少数情况下的特殊插件的锁定状态": "Handle the locked state of special plugins in a few cases", - "⭐ 开始执行": "⭐ Start execution", - "正常情况": "Normal situation", - "下个句子中已经说完的部分": "The part that has already been said in the next sentence", - "首次运行需要花费较长时间下载NOUGAT参数": "The first run takes a long time to download NOUGAT parameters", - "使用tex格式公式 测试2 给出柯西不等式": "Use the tex format formula to test 2 and give the Cauchy inequality", - "无法从bing获取信息!": "Unable to retrieve information from Bing!", - "秒. 请等待任务完成": "Wait for the task to complete", - "开始干正事": "Start doing real work", - "需要花费较长时间下载NOUGAT参数": "It takes a long time to download NOUGAT parameters", - "然后再次点击该插件": "Then click the plugin again", - "受到bing限制": "Restricted by Bing", - "检索文章的历史版本的题目": "Retrieve the titles of historical versions of the article", - "收尾": "Wrap up", - "给定了task": "Given a task", - "某段话的整个句子": "The whole sentence of a paragraph", - "-=-=-=-=-=-=-=-= 写出HTML文件 -=-=-=-=-=-=-=-=": "-=-=-=-=-=-=-=-= Write out HTML file -=-=-=-=-=-=-=-=", - "当前文件": "Current file", - "请在输入框内填写需求": "Please fill in the requirements in the input box", - "结果是一个字符串": "The result is a string", - "用插件实现」": "Implemented with a plugin", - "⭐ 到最后一步了": "⭐ Reached the final step", - "重新修改当前part的标题": "Modify the title of the current part again", - "请勿点击“提交”按钮或者“基础功能区”按钮": "Do not click the 'Submit' button or the 'Basic Function Area' button", - "正在执行命令": "Executing command", - "检测到**滞留的缓存文档**": "Detected **stuck cache document**", - "第三步": "Step three", - "失败了~ 别担心": "Failed~ Don't worry", - "动态代码解释器": "Dynamic code interpreter", - "开始执行": "Start executing", - "不给定task": "No task given", - "正在加载NOUGAT...": "Loading NOUGAT...", - "精准翻译PDF文档": "Accurate translation of PDF documents", - "时间限制TIME_LIMIT": "Time limit TIME_LIMIT", - "翻译前后混合 -=-=-=-=-=-=-=-=": "Mixed translation before and after -=-=-=-=-=-=-=-=", - "搞定代码生成": "Code generation is done", - "插件通道": "Plugin channel", - "智能体": "Intelligent agent", - "切换界面明暗 ☀": "Switch interface brightness ☀", - "交换图像的蓝色通道和红色通道": "Swap blue channel and red channel of the image", - "作为函数参数": "As a function parameter", - "先挑选偶数序列号": "First select even serial numbers", - "仅供测试": "For testing only", - "执行成功了": "Execution succeeded", - "开始逐个文件进行处理": "Start processing files one by one", - "当前文件处理列表": "Current file processing list", - "执行失败了": "Execution failed", - "请及时处理": "Please handle it in time", - "源文件": "Source file", - "裁剪图像": "Crop image", - "插件动态生成插件": "Dynamic generation of plugins", - "正在验证上述代码的有效性": "Validating the above code", - "⭐ = 关键步骤": "⭐ = Key step", - "!= 0 代表“提交”键对话通道": "!= 0 represents the 'Submit' key dialogue channel", - "解析python源代码项目": "Parsing Python source code project", - "请检查PDF是否损坏": "Please check if the PDF is damaged", - "插件动态生成": "Dynamic generation of plugins", - "⭐ 分离代码块": "⭐ Separating code blocks", - "已经被记忆": "Already memorized", - "默认用英文的": "Default to English", - "错误追踪": "Error tracking", - "对话&编程|编程|学术|智能体": "Conversation&ImageGenerating|Programming|Academic|Intelligent agent", - "请检查": "Please check", - "检测到被滞留的缓存文档": "Detected cached documents being left behind", - "还有哪些场合允许使用代理": "What other occasions allow the use of proxies", - "1. 如果有文件": "1. If there is a file", - "执行开始": "Execution starts", - "代码生成结束": "Code generation ends", - "请及时点击“**保存当前对话**”获取所有滞留文档": "Please click '**Save Current Dialogue**' in time to obtain all cached documents", - "需点击“**函数插件区**”按钮进行处理": "Click the '**Function Plugin Area**' button for processing", - "此函数已经弃用": "This function has been deprecated", - "以后再写": "Write it later", - "返回给定的url解析出的arxiv_id": "Return the arxiv_id parsed from the given URL", - "⭐ 文件上传区是否有东西": "⭐ Is there anything in the file upload area", - "Nougat解析论文失败": "Nougat failed to parse the paper", - "本源代码中": "In this source code", - "或者基础功能通道": "Or the basic function channel", - "使用zip压缩格式": "Using zip compression format", - "受到google限制": "Restricted by Google", - "如果是": "If it is", - "不用担心": "don't worry", - "显示/隐藏自定义菜单": "Show/Hide Custom Menu", - "1. 输入文本": "1. Enter Text", - "微软AutoGen": "Microsoft AutoGen", - "在没有声音之后": "After No Sound", - "⭐ 主进程 Docker 外挂文件夹监控": "⭐ Main Process Docker External Folder Monitoring", - "请求任务": "Request Task", - "推荐上传压缩文件": "Recommend Uploading Compressed File", - "我准备好处理下一个问题了": "I'm ready to handle the next question", - "输入要反馈的内容": "Enter the content to be feedbacked", - "当已经存在一个正在运行的MultiAgentTerminal时": "When there is already a running MultiAgentTerminal", - "也根据时间间隔": "Also according to the time interval", - "自定义功能": "Custom Function", - "上传文件后会自动把输入区修改为相应路径": "After uploading the file, the input area will be automatically modified to the corresponding path", - "缺少docker运行环境!": "Missing docker runtime environment!", - "暂不支持中转": "Transit is not supported temporarily", - "一些第三方接口的出现这样的错误": "Some third-party interfaces encounter such errors", - "项目Wiki": "Project Wiki", - "但是我们把上一帧同样加上": "But we also add the previous frame", - "AutoGen 执行失败": "AutoGen execution failed", - "程序抵达用户反馈节点": "The program reaches the user feedback node", - "预制功能": "Prefabricated Function", - "输入新按钮名称": "Enter the new button name", - "| 不需要输入参数": "| No input parameters required", - "如果有新文件出现": "If there is a new file", - "Bug反馈": "Bug Feedback", - "指定翻译成何种语言": "Specify the language to translate into", - "点击保存当前的对话按钮": "Click the save current conversation button", - "如果您需要补充些什么": "If you need to add something", - "HTTPS 秘钥和证书": "HTTPS Key and Certificate", - "输入exit": "Enter exit", - "输入新提示后缀": "Enter a new prompt suffix", - "如果是文本文件": "If it is a text file", - "支持动态切换主题": "Support dynamic theme switching", - "并与self.previous_work_dir_files中所记录的文件进行对比": "And compare with the files recorded in self.previous_work_dir_files", - "作者 Microsoft & Binary-Husky": "Author Microsoft & Binary-Husky", - "请在自定义菜单中定义提示词前缀": "Please define the prefix of the prompt word in the custom menu", - "一般情况下您不需要说什么": "In general, you don't need to say anything", - "「暗色主题已启用": "Dark theme enabled", - "继续向服务器发送n次音频数据": "Continue to send audio data to the server n times", - "获取fp的拓展名": "Get the extension name of fp", - "指令安装内置Gradio及其他依赖": "Command to install built-in Gradio and other dependencies", - "查看自动更新": "Check for automatic updates", - "则更新self.previous_work_dir_files中": "Then update in self.previous_work_dir_files", - "看门狗耐心": "Watchdog patience", - "检测到新生图像": "Detected new image", - "等待AutoGen执行结果": "Waiting for AutoGen execution result", - "自定义菜单": "Custom menu", - "保持链接激活": "Keep the link active", - "已经被新插件取代": "Has been replaced by a new plugin", - "检查当前的模型是否符合要求": "Check if the current model meets the requirements", - "交互功能模板Demo函数": "Interactive function template Demo function", - "上一帧没有人声": "No human voice in the previous frame", - "用于判断异常": "Used to judge exceptions", - "请阅读Wiki": "Please read the Wiki", - "查找wallhaven.cc的壁纸": "Search for wallpapers on wallhaven.cc", - "2. 点击任意基础功能区按钮": "2. Click any button in the basic function area", - "一些垃圾第三方接口的出现这样的错误": "Some errors caused by garbage third-party interfaces", - "再次点击VoidTerminal": "Click VoidTerminal again", - "结束信号已明确": "The end signal is clear", - "获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议": "Failed to get proxy. It is very likely that you will not be able to access OpenAI family models and Google Scholar without a proxy. It is recommended", - "界面外观": "Interface appearance", - "如果您想终止程序": "If you want to terminate the program", - "2. 点击任意函数插件区按钮": "Click any function plugin area button", - "绕过openai访问频率限制": "Bypass openai access frequency limit", - "配置暗色主题或亮色主题": "Configure dark theme or light theme", - "自定义按钮的最大数量限制": "Maximum number limit for custom buttons", - "函数插件区使用说明": "Instructions for function plugin area", - "如何语音对话": "How to have a voice conversation", - "清空输入区": "Clear input area", - "文档清单如下": "The document list is as follows", - "由 audio_convertion_thread": "By audio_convertion_thread", - "音频的可视化表现": "Visual representation of audio", - "然后直接点击“提交”以继续": "Then click 'Submit' to continue", - "运行MultiAgentTerminal": "Run MultiAgentTerminal", - "自定义按钮1": "Custom button 1", - "查看历史上的今天事件": "View events from history", - "如遇到Bug请前往": "If you encounter a bug, please go to", - "当前插件只支持": "The current plugin only supports", - "而不是再次启动一个新的MultiAgentTerminal": "Instead of starting a new MultiAgentTerminal again", - "用户代理或助理代理未定义": "User agent or assistant agent is not defined", - "运行阶段-": "Running phase-", - "随机选择": "Random selection", - "直接点击“提交”以继续": "Click 'Submit' to continue", - "使用项目内置Gradio获取最优体验! 请运行": "Use the built-in Gradio for the best experience! Please run", - "直接点击“提交”以终止AutoGen并解锁": "Click 'Submit' to terminate AutoGen and unlock", - "Github源代码开源和更新": "Github source code is open source and updated", - "直接将用户输入传递给它": "Pass user input directly to it", - "这是一个面向开发者的插件Demo": "This is a plugin demo for developers", - "帮助": "Help", - "普通对话使用说明": "Instructions for normal conversation", - "自定义按钮": "Custom button", - "即使没有声音": "Even without sound", - "⭐ 主进程": "⭐ Main process", - "基础功能区使用说明": "Basic Function Area Usage Instructions", - "提前读取一些信息": "Read some information in advance", - "当用户点击了“等待反馈”按钮时": "When the user clicks the 'Wait for Feedback' button", - "选择一个需要自定义基础功能区按钮": "Select a button in the Basic Function Area that needs to be customized", - "VoidTerminal使用说明": "VoidTerminal Usage Instructions", - "兼容一下吧": "Let's make it compatible", - "⭐⭐ 子进程执行": "⭐⭐ Subprocess execution", - "首次": "For the first time", - "则直接显示文本内容": "Then display the text content directly", - "更新状态": "Update status", - "2. 点击提交": "2. Click Submit", - "⭐⭐ 子进程": "⭐⭐ Subprocess", - "输入新提示前缀": "Enter a new prompt prefix", - "等待用户输入超时": "Wait for user input timeout", - "把新文件和发生变化的文件的路径记录到 change_list 中": "Record the paths of new files and files that have changed in change_list", - "或者上传文件": "Or upload a file", - "或者文件的修改时间发生变化": "Or the modification time of the file has changed", - "1. 输入路径/问题": "1. Enter path/question", - "尝试直接连接": "Try to connect directly", - "未来将删除": "Will be deleted in the future", - "请在自定义菜单中定义提示词后缀": "Please define the suffix of the prompt word in the custom menu", - "将executor存储到cookie中": "Store the executor in the cookie", - "1. 输入问题": "1. Enter question", - "发送一些音频片段给服务器": "Send some audio clips to the server", - "点击VoidTerminal": "Click VoidTerminal", - "扫描路径下的所有文件": "Scan all files under the path", - "检测到新生文档": "Detect new documents", - "预热tiktoken模块": "Preheat the tiktoken module", - "等待您的进一步指令": "Waiting for your further instructions", - "实时语音对话": "Real-time voice conversation", - "确认并保存": "Confirm and save", - "「亮色主题已启用": "Light theme enabled", - "终止AutoGen程序": "Terminate AutoGen program", - "然后根据提示输入指令": "Then enter the command as prompted", - "请上传本地文件/压缩包供“函数插件区”功能调用": "Please upload local files/zip packages for 'Function Plugin Area' function call", - "上传文件": "Upload file", - "上一帧是否有人说话": "Was there anyone speaking in the previous frame", - "这是一个时刻聆听着的语音对话助手 | 没有输入参数": "This is a voice conversation assistant that is always listening | No input parameters", - "常见问题请查阅": "Please refer to the FAQ for common questions", - "更换模型 & Prompt": "Change model & Prompt", - "如何保存对话": "How to save the conversation", - "处理任务": "Process task", - "加载已保存": "Load saved", - "打开浏览器页面": "Open browser page", - "解锁插件": "Unlock plugin", - "如果话筒激活 / 如果处于回声收尾阶段": "If the microphone is active / If it is in the echo tail stage", - "分辨率": "Resolution", - "分析行业动态": "Analyze industry trends", - "在项目实施过程中提供支持": "Provide support during project implementation", - "azure 对齐支持 -=-=-=-=-=-=-": "Azure alignment support -=-=-=-=-=-=-", - "默认的系统提示词": "Default system prompts", - "为您解释复杂的技术概念": "Explain complex technical concepts to you", - "提供项目管理和协作建议": "Provide project management and collaboration advice", - "请从AVAIL_LLM_MODELS中选择": "Please select from AVAIL_LLM_MODELS", - "提高编程能力": "Improve programming skills", - "请注意Newbing组件已不再维护": "Please note that the Newbing component is no longer maintained", - "用于定义和切换多个azure模型 --": "Used to define and switch between multiple Azure models --", - "支持 256x256": "Supports 256x256", - "定义界面上“询问多个GPT模型”插件应该使用哪些模型": "Define which models the 'Ask multiple GPT models' plugin should use on the interface", - "必须是.png格式": "Must be in .png format", - "tokenizer只用于粗估token数量": "The tokenizer is only used to estimate the number of tokens", - "协助您进行文案策划和内容创作": "Assist you in copywriting and content creation", - "帮助您巩固编程基础": "Help you consolidate your programming foundation", - "修改需求": "Modify requirements", - "确保项目顺利进行": "Ensure the smooth progress of the project", - "帮助您了解市场发展和竞争态势": "Help you understand market development and competitive situation", - "不需要动态切换": "No need for dynamic switching", - "解答您在学习过程中遇到的问题": "Answer the questions you encounter during the learning process", - "Endpoint不正确": "Endpoint is incorrect", - "提供编程思路和建议": "Provide programming ideas and suggestions", - "先上传图片": "Upload the image first", - "提供计算机科学、数据科学、人工智能等相关领域的学习资源和建议": "Provide learning resources and advice in computer science, data science, artificial intelligence, and other related fields", - "提供写作建议和技巧": "Provide writing advice and tips", - "间隔": "Interval", - "此后不需要在此处添加api2d的接口了": "No need to add the api2d interface here anymore", - "4. 学习辅导": "4. Learning guidance", - "智谱AI大模型": "Zhipu AI large model", - "3. 项目支持": "3. Project support", - "但这是意料之中的": "But this is expected", - "检查endpoint是否可用": "Check if the endpoint is available", - "接入智谱大模型": "Access the intelligent spectrum model", - "如果您有任何问题或需要解答的议题": "If you have any questions or topics that need answers", - "api2d 对齐支持 -=-=-=-=-=-=-": "api2d alignment support -=-=-=-=-=-=-", - "支持多线程": "Support multi-threading", - "再输入修改需求": "Enter modification requirements again", - "Endpoint不满足要求": "Endpoint does not meet the requirements", - "检查endpoint是否合法": "Check if the endpoint is valid", - "为您制定技术战略提供参考和建议": "Provide reference and advice for developing your technical strategy", - "支持 1024x1024": "Support 1024x1024", - "因为下面的代码会自动添加": "Because the following code will be automatically added", - "尝试加载模型": "Try to load the model", - "使用DALLE3生成图片 | 输入参数字符串": "Use DALLE3 to generate images | Input parameter string", - "当前论文无需解析": "The current paper does not need to be parsed", - "单个azure模型部署": "Deploy a single Azure model", - "512x512 或 1024x1024": "512x512 or 1024x1024", - "至少是8k上下文的模型": "A model with at least 8k context", - "自动忽略重复的输入": "Automatically ignore duplicate inputs", - "让您更好地掌握知识": "Help you better grasp knowledge", - "文件列表": "File list", - "并在不同模型之间用": "And use it between different models", - "插件调用出错": "Plugin call error", - "帮助您撰写文章、报告、散文、故事等": "Help you write articles, reports, essays, stories, etc.", - "*实验性功能*": "*Experimental feature*", - "2. 编程": "2. Programming", - "让您更容易理解": "Make it easier for you to understand", - "的最大上下文长度太短": "The maximum context length is too short", - "方法二": "Method 2", - "多个azure模型部署+动态切换": "Deploy multiple Azure models + dynamic switching", - "详情请见额外文档 docs\\use_azure.md": "For details, please refer to the additional document docs\\use_azure.md", - "包括但不限于 Python、Java、C++ 等": "Including but not limited to Python, Java, C++, etc.", - "为您提供业界最新的新闻和技术趋势": "Providing you with the latest industry news and technology trends", - "自动检测并屏蔽失效的KEY": "Automatically detect and block invalid keys", - "请勿使用": "Please do not use", - "最后输入分辨率": "Enter the resolution at last", - "图片": "Image", - "请检查AZURE_ENDPOINT的配置! 当前的Endpoint为": "Please check the configuration of AZURE_ENDPOINT! The current Endpoint is", - "图片修改": "Image modification", - "已经收集到所有信息": "All information has been collected", - "加载API_KEY": "Loading API_KEY", - "协助您编写代码": "Assist you in writing code", - "我可以为您提供以下服务": "I can provide you with the following services", - "排队中请稍候 ...": "Please wait in line ...", - "建议您使用英文提示词": "It is recommended to use English prompts", - "不能支撑AutoGen运行": "Cannot support AutoGen operation", - "帮助您解决编程问题": "Help you solve programming problems", - "上次用户反馈输入为": "Last user feedback input is", - "请随时告诉我您的需求": "Please feel free to tell me your needs", - "有 sys_prompt 接口": "There is a sys_prompt interface", - "可能会覆盖之前的配置": "May overwrite previous configuration", - "5. 行业动态和趋势分析": "5. Industry dynamics and trend analysis", - "正在等待线程锁": "Waiting for thread lock", - "请输入分辨率": "Please enter the resolution", - "接驳void-terminal": "Connecting to void-terminal", - "启动DALLE2图像修改向导程序": "Launching DALLE2 image modification wizard program", - "加载模型失败": "Failed to load the model", - "是否使用Docker容器运行代码": "Whether to run the code using Docker container", - "请输入修改需求": "Please enter modification requirements", - "作为您的写作和编程助手": "As your writing and programming assistant", - "然后再次点击本插件": "Then click this plugin again", - "需要动态切换": "Dynamic switching is required", - "文心大模型4.0": "Wenxin Large Model 4.0", - "找不到任何.pdf拓展名的文件": "Cannot find any file with .pdf extension", - "在使用AutoGen插件时": "When using the AutoGen plugin", - "协助您规划项目进度和任务分配": "Assist you in planning project schedules and task assignments", - "1. 写作": "1. Writing", - "你亲手写的api名称": "The API name you wrote yourself", - "使用DALLE2生成图片 | 输入参数字符串": "Generate images using DALLE2 | Input parameter string", - "方法一": "Method 1", - "我会尽力提供帮助": "I will do my best to provide assistance", - "多个azure模型": "Multiple Azure models", - "准备就绪": "Ready", - "请随时提问": "Please feel free to ask", - "如果需要使用AZURE": "If you need to use AZURE", - "如果不是本地模型": "If it is not a local model", - "AZURE_CFG_ARRAY中配置的模型必须以azure开头": "The models configured in AZURE_CFG_ARRAY must start with 'azure'", - "API key has been deactivated. OpenAI以账户失效为由": "API key has been deactivated. OpenAI considers it as an account failure", - "请先上传图像": "Please upload the image first", - "高优先级": "High priority", - "请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY", - "单个azure模型": "Single Azure model", - "预留参数 context 未实现": "Reserved parameter 'context' not implemented", - "在输入区输入临时API_KEY后提交": "Submit after entering temporary API_KEY in the input area", - "鸟": "Bird", - "图片中需要修改的位置用橡皮擦擦除为纯白色": "Erase the areas in the image that need to be modified with an eraser to pure white", - "└── PDF文档精准解析": "└── Accurate parsing of PDF documents", - "└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "└── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description", - "等待指令": "Waiting for instructions", - "不存在": "Does not exist", - "选择游戏": "Select game", - "本地大模型示意图": "Local large model diagram", - "无视此消息即可": "You can ignore this message", - "即RGB=255": "That is, RGB=255", - "如需追问": "If you have further questions", - "也可以是具体的模型路径": "It can also be a specific model path", - "才会起作用": "Will take effect", - "下载失败": "Download failed", - "网页刷新后失效": "Invalid after webpage refresh", - "crazy_functions.互动小游戏-": "crazy_functions.Interactive mini game-", - "右对齐": "Right alignment", - "您可以调用下拉菜单中的“LoadConversationHistoryArchive”还原当下的对话": "You can use the 'LoadConversationHistoryArchive' in the drop-down menu to restore the current conversation", - "左对齐": "Left alignment", - "使用默认的 FP16": "Use default FP16", - "一小时": "One hour", - "从而方便内存的释放": "Thus facilitating memory release", - "如何临时更换API_KEY": "How to temporarily change API_KEY", - "请输入 1024x1024-HD": "Please enter 1024x1024-HD", - "使用 INT8 量化": "Use INT8 quantization", - "3. 输入修改需求": "3. Enter modification requirements", - "刷新界面 由于请求gpt需要一段时间": "Refreshing the interface takes some time due to the request for gpt", - "随机小游戏": "Random mini game", - "那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型": "So please specify the specific model in QWEN_MODEL_SELECTION below", - "表值": "Table value", - "我画你猜": "I draw, you guess", - "狗": "Dog", - "2. 输入分辨率": "2. Enter resolution", - "鱼": "Fish", - "尚未完成": "Not yet completed", - "表头": "Table header", - "填localhost或者127.0.0.1": "Fill in localhost or 127.0.0.1", - "请上传jpg格式的图片": "Please upload images in jpg format", - "API_URL_REDIRECT填写格式是错误的": "The format of API_URL_REDIRECT is incorrect", - "├── RWKV的支持见Wiki": "Support for RWKV is available in the Wiki", - "如果中文Prompt效果不理想": "If the Chinese prompt is not effective", - "/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix": "/SEAFILE_LOCAL/50503047/My Library/Degree/paperlatex/aaai/Fu_8368_with_appendix", - "只有当AVAIL_LLM_MODELS包含了对应本地模型时": "Only when AVAIL_LLM_MODELS contains the corresponding local model", - "选择本地模型变体": "Choose the local model variant", - "如果您确信自己没填错": "If you are sure you haven't made a mistake", - "PyPDF2这个库有严重的内存泄露问题": "PyPDF2 library has serious memory leak issues", - "整理文件集合 输出消息": "Organize file collection and output message", - "没有检测到任何近期上传的图像文件": "No recently uploaded image files detected", - "游戏结束": "Game over", - "调用结束": "Call ended", - "猫": "Cat", - "请及时切换模型": "Please switch models in time", - "次中": "In the meantime", - "如需生成高清图像": "If you need to generate high-definition images", - "CPU 模式": "CPU mode", - "项目目录": "Project directory", - "动物": "Animal", - "居中对齐": "Center alignment", - "请注意拓展名需要小写": "Please note that the extension name needs to be lowercase", - "重试第": "Retry", - "实验性功能": "Experimental feature", - "猜错了": "Wrong guess", - "打开你的代理软件查看代理协议": "Open your proxy software to view the proxy agreement", - "您不需要再重复强调该文件的路径了": "You don't need to emphasize the file path again", - "请阅读": "Please read", - "请直接输入您的问题": "Please enter your question directly", - "API_URL_REDIRECT填错了": "API_URL_REDIRECT is filled incorrectly", - "谜底是": "The answer is", - "第一个模型": "The first model", - "你猜对了!": "You guessed it right!", - "已经接收到您上传的文件": "The file you uploaded has been received", - "您正在调用“图像生成”插件": "You are calling the 'Image Generation' plugin", - "刷新界面 界面更新": "Refresh the interface, interface update", - "如果之前已经初始化了游戏实例": "If the game instance has been initialized before", - "文件": "File", - "老鼠": "Mouse", - "列2": "Column 2", - "等待图片": "Waiting for image", - "使用 INT4 量化": "Use INT4 quantization", - "from crazy_functions.互动小游戏 import 随机小游戏": "TranslatedText", - "游戏主体": "TranslatedText", - "该模型不具备上下文对话能力": "TranslatedText", - "列3": "TranslatedText", - "清理": "TranslatedText", - "检查量化配置": "TranslatedText", - "如果游戏结束": "TranslatedText", - "蛇": "TranslatedText", - "则继续该实例;否则重新初始化": "TranslatedText", - "e.g. cat and 猫 are the same thing": "TranslatedText", - "第三个模型": "TranslatedText", - "如果你选择Qwen系列的模型": "TranslatedText", - "列4": "TranslatedText", - "输入“exit”获取答案": "TranslatedText", - "把它放到子进程中运行": "TranslatedText", - "列1": "TranslatedText", - "使用该模型需要额外依赖": "TranslatedText", - "再试试": "TranslatedText", - "1. 上传图片": "TranslatedText", - "保存状态": "TranslatedText", - "GPT-Academic对话存档": "TranslatedText", - "Arxiv论文精细翻译": "TranslatedText", - "from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering", - "测试图表渲染": "test_chart_rendering" -} diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json deleted file mode 100644 index a70f5df17b850760dee76f6433e6c028c4aed092..0000000000000000000000000000000000000000 --- a/docs/translate_japanese.json +++ /dev/null @@ -1,2109 +0,0 @@ -{ - "print亮黄": "PrintBrightYellow", - "print亮绿": "PrintBrightGreen", - "print亮红": "PrintBrightRed", - "print红": "PrintRed", - "print绿": "PrintGreen", - "print黄": "PrintYellow", - "print蓝": "PrintBlue", - "print紫": "PrintPurple", - "print靛": "PrintIndigo", - "print亮蓝": "PrintBrightBlue", - "print亮紫": "PrintBrightPurple", - "print亮靛": "PrintBrightIndigo", - "读文章写摘要": "ReadArticleWriteSummary", - "批量生成函数注释": "BatchGenerateFunctionComments", - "生成函数注释": "GenerateFunctionComments", - "解析项目本身": "ParseProjectItself", - "解析项目源代码": "ParseProjectSourceCode", - "解析一个Python项目": "ParsePythonProject", - "解析一个C项目的头文件": "ParseCProjectHeaderFile", - "解析一个C项目": "ParseACProject", - "解析一个Golang项目": "ParseAGolangProject", - "解析一个Rust项目": "ParseARustProject", - "解析一个Java项目": "ParseAJavaProject", - "解析一个前端项目": "ParseAFrontendProject", - "高阶功能模板函数": "AdvancedFeatureTemplateFunction", - "高级功能函数模板": "AdvancedFunctionTemplate", - "全项目切换英文": "SwitchProjectToEnglish", - "代码重写为全英文_多线程": "RewriteCodeToEnglish_Multithreading", - "Latex英文润色": "LatexEnglishProofreading", - "Latex全文润色": "LatexFullTextProofreading", - "同时问询": "SimultaneousInquiry", - "询问多个大语言模型": "InquireMultipleLargeLanguageModels", - "解析一个Lua项目": "ParseALuaProject", - "解析一个CSharp项目": "ParseACSharpProject", - "总结word文档": "SummarizeWordDocument", - "解析ipynb文件": "ParseIpynbFile", - "解析JupyterNotebook": "ParseJupyterNotebook", - "对话历史存档": "ConversationHistoryArchive", - "载入对话历史存档": "LoadConversationHistoryArchive", - "删除所有本地对话历史记录": "DeleteAllLocalChatHistory", - "Markdown英译中": "MarkdownTranslateFromEngToChi", - "批量Markdown翻译": "BatchTranslateMarkdown", - "批量总结PDF文档": "BatchSummarizePDFDocuments", - "批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPDFMiner", - "批量翻译PDF文档": "BatchTranslatePDFDocuments", - "批量翻译PDF文档_多线程": "BatchTranslatePDFDocumentsUsingMultiThreading", - "谷歌检索小助手": "GoogleSearchAssistant", - "理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPDFDocumentContent", - "理解PDF文档内容": "UnderstandingPDFDocumentContent", - "Latex中文润色": "ChineseProofreadingInLatex", - "Latex中译英": "ChineseToEnglishTranslationInLatex", - "Latex全文翻译": "FullTextTranslationInLatex", - "Latex英译中": "EnglishToChineseTranslationInLatex", - "Markdown中译英": "TranslateFromChiToEngInMarkdown", - "下载arxiv论文并翻译摘要": "DownloadArxivPapersAndTranslateAbstract", - "下载arxiv论文翻译摘要": "DownloadArxivPapersAndTranslateAbstract", - "连接网络回答问题": "ConnectToInternetAndAnswerQuestions", - "联网的ChatGPT": "ChatGPTConnectedToInternet", - "解析任意code项目": "ParseAnyCodeProject", - "同时问询_指定模型": "InquireSpecifiedModelAtTheSameTime", - "图片生成": "GenerateImage", - "test_解析ipynb文件": "test_ParseIpynbFile", - "把字符太少的块清除为回车": "RemoveBlocksWithTooFewCharactersToNewline", - "清理多余的空行": "CleanUpExtraBlankLines", - "合并小写开头的段落块": "MergeParagraphBlocksStartingWithLowerCase", - "多文件润色": "PolishMultipleFiles", - "多文件翻译": "TranslateMultipleFiles", - "解析docx": "ParseDocx", - "解析PDF": "ParsePDF", - "解析Paper": "ParsePaper", - "ipynb解释": "InterpretIpynb", - "解析源代码新": "ParseSourceCodeNew", - "填写格式是": "入力フォーマットは", - "并在新模块中重新加载函数": "新しいモジュールで関数を再読み込みする", - "如果要使用MOSS": "MOSSを使用する場合", - "翻译成地道的中文": "自然な中国語に翻訳する", - "请对下面的程序文件做一个概述": "以下のプログラムファイルについて概要を説明してください", - "用tex格式": "TeX形式で", - "浮点数": "浮動小数点数", - "第三部分": "第3部分", - "这个函数运行在子进程": "この関数はサブプロセスで実行されます", - "自动解压": "自動解凍", - "按Enter提交": "Enterを押して提出する", - "如果超过期限没有喂狗": "期限を過ぎてもフィードしない場合", - "正在开始汇总": "集計を開始しています", - "安装jittorllms依赖后将完全破坏现有的pytorch环境": "jittorllmsの依存関係をインストールすると、既存のpytorch環境が完全に破壊されます", - "尝试加载": "読み込みを試みる", - "* 此函数未来将被弃用": "* この関数は将来的に廃止されます", - "newbing回复的片段": "newbingの返信フラグメント", - "新版本可用": "新しいバージョンが利用可能です", - "函数插件区": "関数プラグインエリア", - "jittorllms消耗大量的内存": "jittorllmsは大量のメモリを消費します", - "替换跨行的连词": "複数行の接続詞を置換する", - "Markdown/Readme英译中": "Markdown/Readmeの英訳中", - "如果需要使用newbing": "newbingを使用する必要がある場合", - "对整个Markdown项目进行翻译": "Markdownプロジェクト全体を翻訳する", - "比正文字体小": "本文より小さいフォントサイズ", - "请对下面的文章片段做概述": "以下の記事の断片について概要を説明してください", - "正在获取文献名!": "文献名を取得しています!", - "展现在报告中的输入": "レポートに表示される入力", - "则删除报错信息": "エラーメッセージを削除する", - "第3步": "ステップ3", - "尚未充分测试的函数插件": "十分にテストされていない関数プラグイン", - "You exceeded your current quota. OpenAI以账户额度不足为由": "現在のクォータを超過しました。OpenAIはアカウントのクォータ不足を理由にしています", - "下载完成": "ダウンロードが完了しました", - "正常结束": "正常に終了しました", - "第1步": "ステップ1", - "必要时": "必要に応じて", - "留空即可": "空白のままにしておくことができます", - "文件名是": "ファイル名は", - "双层列表": "二重リスト", - "上下文管理器是一种Python对象": "コンテキストマネージャはPythonオブジェクトの一種です", - "**输出参数说明**": "**出力パラメータの説明**", - "history至少释放二分之一": "historyは少なくとも半分解放する必要があります", - "拒绝服务": "サービスを拒否する", - "默认按钮颜色是 secondary": "デフォルトのボタンの色はsecondaryです", - "加了^代表不匹配": "^を追加すると、一致しないことを意味します", - "读取时首先看是否存在私密的config_private配置文件": "読み取り時に、まずconfig_private構成ファイルが存在するかどうかを確認します", - "如果这里抛出异常": "ここで例外が発生した場合", - "缺少api_key": "api_keyが不足しています", - "而cl**h 的默认本地协议是http": "cl ** hのデフォルトのローカルプロトコルはhttpです", - "尝试计算比例": "比率を計算しようとする", - "你是一个程序架构分析师": "あなたはプログラムアーキテクチャアナリストです", - "jittorllms响应异常": "jittorllms応答異常", - "开始问问题": "質問を始める", - "的模板": "のテンプレート", - "加一个live2d装饰": "live2dの装飾を追加する", - "经过充分测试": "十分にテストされた後", - "gradio版本较旧": "Gradioのバージョンが古いです", - "配置信息如下": "以下は構成情報です", - "刷新用户界面": "ユーザーインターフェースを更新する", - "翻译": "翻訳", - "读取配置": "構成を読み込む", - "第二种情况": "2番目の場合", - "接下来": "次に", - "合并小写字母开头的段落块并替换为空格": "小文字で始まる段落ブロックを結合して空白に置き換える", - "质能方程是描述质量与能量之间的当量关系的方程": "質量とエネルギーの間の等価関係を記述する質量エネルギー方程式", - "匹配^数字^": "^数字^に一致する", - "提高语法、清晰度和整体可读性": "文法、明確さ、全体的な読みやすさを向上させる", - "对最相关的两个搜索结果进行总结": "最も関連性の高い2つの検索結果をまとめる", - "另外您可以随时在history子文件夹下找回旧版的程序": "また、いつでもhistoryサブフォルダーで古いバージョンのプログラムを取得できます", - "将每个换行符替换为两个换行符": "各改行文字を2つの改行文字に置き換える", - "调用NewBing时": "NewBingを呼び出すとき", - "接下来请你逐文件分析下面的工程": "次に、以下のプロジェクトをファイルごとに分析してください", - "不可高于3": "3を超えることはできません", - "本项目现已支持OpenAI和API2D的api-key": "このプロジェクトは現在、OpenAIおよびAPI2DのAPIキーをサポートしています", - "llm_kwargs参数": "llm_kwargsパラメータ", - "切割PDF": "PDFを切り分ける", - "随便切一下敷衍吧": "適当に切ってください", - "按照章节切割PDF": "章ごとにPDFを切り分ける", - "聊天显示框的句柄": "チャット表示ボックスのハンドル", - "已删除": "削除されました", - "如果没有指定文件名": "ファイル名が指定されていない場合", - "Tiktoken未知错误": "Tiktokenの未知のエラー", - "你的回答必须简单明了": "回答は簡潔で明確でなければなりません", - "\\n 翻译": "\\n翻訳", - "2. 长效解决方案": "長期的な解決策", - "上下文": "文脈", - "图像中转网址": "画像の中継ウェブサイト", - "感叹号": "感嘆符", - "第 4 步": "4番目のステップ", - "为了安全而隐藏绝对地址": "安全のために絶対アドレスを隠す", - "获取成功": "取得成功", - "综合": "総合", - "在执行过程中遭遇问题": "実行中に問題が発生しました", - "输入参数 Args": "入力パラメータArgs", - "在项目根目录运行这两个指令": "プロジェクトのルートディレクトリでこれら2つのコマンドを実行する", - "文件内容是": "ファイルの内容は", - "css等": "CSSなど", - "发送请求到OpenAI后": "OpenAIにリクエストを送信した後", - "来保留函数的元信息": "関数のメタ情報を保持するために", - "第3次尝试": "3回目の試み", - "我们": "私たちは", - "注意无论是inputs还是history": "inputsまたはhistoryである場合でも注意してください", - "本地路径": "ローカルパス", - "1. 对原始文本进行归一化处理": "1.元のテキストを正規化する", - "这个文件用于函数插件的单元测试": "このファイルは関数プラグインのユニットテストに使用されます", - "用于基础的对话功能": "基本的な対話機能に使用されます", - "代理设置": "プロキシ設定", - "在此处替换您要搜索的关键词": "ここで検索するキーワードを置き換えてください", - "请求GPT模型同时维持用户界面活跃": "GPTモデルにリクエストを送信しながら、ユーザーインターフェイスを活性化します", - "3. 根据 heuristic 规则判断换行符是否是段落分隔": "3.ヒューリスティックルールに従って、改行が段落の区切りかどうかを判断する", - "temperature是LLM的内部调优参数": "temperatureはLLMの内部調整パラメータです", - "发送到chatgpt进行分析": "chatgptに送信して分析する", - "在config.py中配置": "config.pyに設定する", - "第 1 步": "ステップ1", - "定义注释的正则表达式": "コメントの正規表現を定義する", - "OpenAI绑了信用卡的用户可以填 16 或者更高": "OpenAIにクレジットカードをバインドしているユーザーは、16以上を入力できます", - "模仿ChatPDF": "ChatPDFを模倣する", - "以_array结尾的输入变量都是列表": "_arrayで終わる入力変数はすべてリストです", - "终止按钮的回调函数注册": "停止ボタンのコールバック関数の登録", - "意外Json结构": "予期しないJson構造", - "需要安装pip install py7zr来解压7z文件": "7zファイルを解凍するには、pip install py7zrをインストールする必要があります", - "将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词": "Unsplash APIのPUT_YOUR_QUERY_HEREを、そのイベントを最もよく表す単語に置き換えます", - "预处理": "前処理", - "状态": "ステータス", - "知乎": "知乎", - "聊天历史": "チャット履歴", - "请从给定的若干条搜索结果中抽取信息": "指定された複数の検索結果から情報を抽出してください", - "通过裁剪来缩短历史记录的长度": "履歴の長さを短くするためにトリミングを使用する", - "函数插件作者": "関数プラグインの作者", - "这个中文的句号是故意的": "この中国語の句点は意図的です", - "双换行": "二重改行", - "用了很多trick": "多くのトリックを使用しました", - "如.md": ".mdのように", - "屏蔽掉 chatglm的多线程": "chatglmのマルチスレッドをブロックする", - "但显示Token不足": "ただし、トークンが不足していると表示されます", - "对文本进行归一化处理": "テキストを正規化する", - "把结果写入文件": "結果をファイルに書き込む", - "如果没找到任何文件": "ファイルが見つからなかった場合", - "请确认是否满足您的需要": "必要条件を満たしているかどうかを確認してください", - "您提供的api-key不满足要求": "提供されたAPIキーが要件を満たしていません", - "MOSS消耗大量的内存": "MOSSは大量のメモリを消費します", - "文本过长将进行截断": "テキストが長すぎる場合は切り捨てられます", - "橙色": "オレンジ色", - "失败时的重试次数": "失敗時の再試行回数", - "+ 已经汇总的文件组": "すでにまとめられたファイルグループ", - "相关功能不稳定": "関連機能は不安定です", - "将要匹配的模式": "マッチングするパターン", - "第4步": "ステップ4", - "调用时": "呼び出し時", - "问询记录": "問い合わせ記録", - "不能正常加载MOSS的参数!": "MOSSのパラメータを正常にロードできません!", - "接管gradio默认的markdown处理方式": "gradioのデフォルトのmarkdown処理方法を接管する", - "加载tokenizer完毕": "tokenizerの読み込みが完了しました", - "请用markdown格式输出": "markdown形式で出力してください", - "PDF文件也已经下载": "PDFファイルもダウンロードされました", - "读取Latex文件": "Latexファイルを読み込む", - "找不到任何.tex或.pdf文件": ".texまたは.pdfファイルが見つかりません", - "端口": "ポート", - "此外": "さらに", - "使用yield from语句返回重新加载过的函数": "yield fromステートメントを使用して再読み込みされた関数を返す", - "函数插件贡献者": "関数プラグインの貢献者", - "绿色": "緑色", - "酸橙色": "ライムグリーン", - "找不到本地项目或无权访问": "ローカルプロジェクトが見つからないか、アクセス権がありません", - "此函数逐渐地搜索最长的条目进行剪辑": "この関数は徐々に最長のエントリを検索して編集します", - "注意这里的历史记录被替代了": "ここでの履歴は置き換えられました", - "但大部分场合下并不需要修改": "ただし、ほとんどの場合、変更は必要ありません", - "这个内部函数可以将函数的原始定义更新为最新版本": "この内部関数は、関数の元の定義を最新バージョンに更新できます", - "输出了前面的": "前のものを出力し、1つの文字列に結合します", - "并合并为一个字符串": "前のものを出力し、1つの文字列に結合します", - "出现的所有文章": "表示されるすべての記事", - "pip包依赖安装出现问题": "pipパッケージの依存関係のインストールに問題が発生しました", - "用于重组输入参数": "入力パラメーターを再構成するために使用されます", - "格式须是": "フォーマットは次のようにする必要があります", - "请注意proxies选项的格式": "proxiesオプションの形式に注意してください", - "api_key已导入": "api_keyがインポートされました", - "新版配置": "新しいバージョンの設定", - "暂时没有用武之地": "現時点では使用されていません", - "返回文本内容": "テキストコンテンツを返します", - "从而避免解析压缩文件": "圧縮ファイルの解析を回避するため", - "环境变量可以是": "環境変数は次のようにすることができます", - "接下来两句话只显示在界面上": "次の2つの文は、画面にのみ表示されます", - "解析的结果如下": "解析結果は以下のとおりです", - "若上传压缩文件": "圧縮ファイルをアップロードする場合", - "找不到任何html文件": "htmlファイルが見つかりません", - "环境变量": "環境変数", - "备选输入区": "代替入力エリア", - "如果文章被切分了": "記事が分割された場合", - "异常原因": "異常の原因", - "生成带有段落标签的HTML代码": "段落タグを持つHTMLコードを生成する", - "按钮颜色": "ボタンの色", - "请只提供文本的更正版本": "テキストの修正バージョンのみを提供してください", - "输入": "入力", - "插件参数区": "プラグインパラメータエリア", - "玫瑰色": "ローズ色", - "根据以上分析": "上記の分析に基づいて", - "解析整个Go项目": "Goプロジェクト全体を解析する", - "解析整个Rust项目": "Rustプロジェクト全体を解析する", - "新功能": "新機能", - "避免代理网络产生意外污染": "プロキシネットワークによる予期しない汚染を回避する", - "检测到": "検出された", - "借助此参数": "このパラメータを利用する", - "重置": "リセット", - "优先级2. 获取config_private中的配置": "優先度2. config_privateから設定を取得する", - "具备以下功能": "以下の機能を備えています", - "的耐心": "の忍耐力", - "将输出代码片段的“后面的": "コードスニペットの後ろに出力する", - "等待重试": "再試行を待つ", - "覆盖和重启": "上書きして再起動する", - "ChatGPT 学术优化": "ChatGPT学術最適化", - "后面两句是": "後の2文は", - "检查代理服务器是否可用": "プロキシサーバーが利用可能かどうかを確認する", - "存在一行极长的文本!": "1行の非常に長いテキストが存在します!", - "减少重复": "重複を減らす", - "暗色主题": "ダークテーマ", - "提取出以下内容": "以下の内容を抽出する", - "先在input输入编号": "まずinputに番号を入力してください", - "当输入部分的token占比小于限制的3/4时": "入力部分のトークンの割合が制限の3/4未満の場合", - "检测输入参数": "入力パラメータを検出する", - "api-key不满足要求": "api-keyが要件を満たしていない", - "刷新界面": "画面を更新する", - "重试的次数限制": "再試行回数の制限", - "输入路径或上传压缩包": "パスを入力するか、圧縮ファイルをアップロードする", - "如果某个子任务出错": "サブタスクのいずれかがエラーになった場合", - "已经全部完成": "すべて完了しました", - "并对文件中的所有函数生成注释": "すべての関数にコメントを生成する", - "如果选择自动处理": "自動処理を選択した場合", - "缺少的依赖": "不足している依存関係", - "紫色": "紫色", - "唤起高级参数输入区": "高度なパラメータ入力エリアを呼び出す", - "则换行符更有可能表示段落分隔": "したがって、改行記号は段落の区切りを表す可能性がより高いです", - ";4、引用数量": ";4、引用数量", - "中转网址预览": "中継ウェブサイトのプレビュー", - "批量总结Word文档": "Word文書を一括で要約する", - "建议低于1": "1未満をお勧めします", - "并且将结合上下文内容": "そして文脈内容を結合します", - "整合所有信息": "すべての情報を統合する", - "解析整个Lua项目": "Luaプロジェクト全体を解析する", - "它的作用是……额……就是不起作用": "その役割は……ああ……機能しないことです", - "列表长度为子任务的数量": "リストの長さはサブタスクの数です", - "为实现更多强大的功能做基础": "より強力な機能を実現するための基盤となる", - "请从数据中提取信息": "データから情報を抽出してください", - "至少一个线程任务Token溢出而失败": "少なくとも1つのスレッドタスクトークンがオーバーフローして失敗します", - "是否自动处理token溢出的情况": "トークンのオーバーフローを自動的に処理するかどうか", - "本地LLM模型如ChatGLM的执行方式 CPU/GPU": "ローカルLLMモデルの実行方法、例えばChatGLM CPU/GPU", - "等待中": "待機中", - "任务函数": "タスク関数", - "等文本特殊符号转换为其基本形式来对文本进行归一化处理": "テキストの特殊記号を基本形式に変換してテキストを正規化する", - "集合文件": "集合ファイル", - "替换其他特殊字符": "他の特殊文字を置換する", - "选择LLM模型": "LLMモデルを選択する", - "超过512个": "512を超える", - "装载请求内容": "リクエストコンテンツをロードする", - "根据前后相邻字符的特点": "前後の文字の特徴に基づく", - "GPT模型返回的回复字符串": "GPTモデルからの返信文字列", - "将对话记录history以Markdown格式写入文件中": "対話履歴をMarkdown形式でファイルに書き込む", - "无法连接到该网页": "このウェブページに接続できません", - "**输入参数说明**": "**入力パラメータの説明**", - "设置用户名和密码": "ユーザー名とパスワードを設定する", - "GPT参数": "GPTパラメータ", - "请用代码块输出代码": "コードブロックでコードを出力してください", - "保存当前的对话": "現在の対話を保存する", - "在这里输入分辨率": "解像度をここに入力してください", - "不能正常加载jittorllms的参数!": "jittorllmsのパラメータを正常にロードできません!", - "如果包含数学公式": "数式が含まれている場合", - "子线程任务": "サブスレッドタスク", - ";5、中文摘要翻译": ";5、中国語要約翻訳", - "截断时的颗粒度": "切り捨て時の粒度", - "作为一名中文学术论文写作改进助理": "中国語学術論文の執筆改善アシスタントとして", - "解析网页内容": "ウェブページの内容を解析する", - "作为切分点": "分割点として", - "将长文本分离开来": "長いテキストを分離する", - "总结文章": "記事をまとめる", - "左右布局": "左右レイアウト", - "用户取消了程序": "ユーザーがプログラムをキャンセルしました", - "多线程函数插件中": "マルチスレッド関数プラグインで", - "不能识别的URL!": "認識できないURL!", - "逐个文件分析已完成": "1つずつファイルを分析しました", - "感谢热情的": "熱心な感謝", - "是本次输出": "今回の出力です", - "协议": "プロトコル", - "例如需要翻译的一段话": "翻訳が必要な例文", - "本地文件地址": "ローカルファイルアドレス", - "更好的UI视觉效果": "より良いUI視覚効果", - "窗口布局": "ウィンドウレイアウト", - "测试功能": "テスト機能", - "前者API2D的": "前者API2Dの", - "请缩减输入文件的数量": "入力ファイルの数を減らしてください", - "随便显示点什么防止卡顿的感觉": "何か表示してカクつきを防止する", - "删除所有历史对话文件": "すべての履歴対話ファイルを削除する", - "是否在输入过长时": "入力が長すぎる場合は", - "只保留文件名节省token": "ファイル名のみを保持してトークンを節約する", - "插件模型的参数": "プラグインモデルのパラメータ", - "若再次失败则更可能是因为输入过长.": "再度失敗した場合、入力が長すぎる可能性が高いです。", - "或历史数据过长. 历史缓存数据已部分释放": "または履歴データが長すぎます。履歴キャッシュデータは一部解放されました", - "虽然不同的代理软件界面不一样": "異なるプロキシソフトウェアのインターフェースは異なりますが", - "英译中": "英語から中国語への翻訳", - "第4次尝试": "4回目の試み", - "批": "バッチ", - "方便调试和定位问题": "デバッグと問題の特定を容易にする", - "IP查询频率受限": "IPクエリ頻度が制限されています", - "则不解析notebook中的Markdown块": "したがって、ノートブックのMarkdownブロックを解析しない", - "英语关键词": "英語のキーワード", - "热更新prompt": "プロンプトのホット更新", - "保存当前对话": "現在の対話を保存する", - "我们用最暴力的方法切割": "最も暴力的な方法で切り分けます", - "Index 0 文本": "インデックス0テキスト", - "最大线程数": "最大スレッド数", - "然后用for+append循环重新赋值": "for+appendループを使用して値を再割り当てする", - "获取文章meta信息": "記事のメタ情報を取得する", - "Pay-as-you-go users的限制是每分钟3500次": "Pay-as-you-goユーザーの制限は1分間に3500回です", - "请注意": "注意してください", - "的转化": "の変換", - "解析Jupyter Notebook文件": "Jupyter Notebookファイルの解析", - "等待多久判定为超时": "タイムアウトとして判定するまでの待機時間", - "自动缩减文本": "テキストを自動的に縮小する", - "返回当前系统中可用的未使用端口": "現在のシステムで使用可能な未使用のポートを返す", - "历史对话输入": "過去の対話入力", - "其他错误": "その他のエラー", - "将错误显示出来": "エラーを表示する", - "请分析此页面中出现的所有文章": "このページに表示されるすべての記事を分析してください", - "将Markdown格式的文本转换为HTML格式": "Markdown形式のテキストをHTML形式に変換する", - "没有 sys_prompt 接口": "sys_promptインターフェースがありません", - "您可以将任意一个文件路径粘贴到输入区": "任意のファイルパスを入力エリアに貼り付けることができます", - "全部文件解析完成": "すべてのファイルの解析が完了しました", - "将匹配到的数字作为替换值": "一致した数字を置換値として使用する", - "单行 + 字体大": "1行+フォント大", - "备份和下载": "バックアップとダウンロード", - "用一张Markdown表格简要描述以下文件的功能": "以下のファイルの機能を簡単にMarkdownテーブルで説明してください", - "问题": "問題", - "请将此部分润色以满足学术标准": "この部分を学術基準に合わせて磨き上げてください", - "你是一位专业的中文学术论文作家": "あなたは専門の中国語学術論文作家です", - "对话历史文件损坏!": "対話履歴ファイルが破損しています!", - "重新URL重新定向": "URLを再度リダイレクトする", - "输入清除键": "入力クリアキー", - "因此把prompt加入 history": "したがって、履歴にpromptを追加します", - "以上文件将被作为输入参数": "上記のファイルは入力パラメータとして使用されます", - "的长度必须小于 2500 个 Token": "長さは2500トークン以下でなければなりません", - "现在": "今", - "不需要再次转化": "再変換する必要はありません", - "注意文章中的每一句话都要翻译": "記事の各文は翻訳する必要があります", - "整理报告的格式": "レポートのフォーマットを整理する", - "请先从插件列表中选择": "まず、プラグインリストから選択してください", - "带token约简功能": "トークン約束機能を備えた", - "请在config文件中修改API密钥之后再运行": "APIキーを変更した後にconfigファイルで実行してください", - "下载编号": "ダウンロード番号", - "是否丢弃掉 不是正文的内容": "本文でない内容を破棄するかどうか", - "以确保一些资源在代码块执行期间得到正确的初始化和清理": "いくつかのリソースがコードブロックの実行中に正しく初期化およびクリーンアップされるようにするため", - "第一步": "ステップ1", - "并将输出部分的Markdown和数学公式转换为HTML格式": "出力部分のMarkdownと数式をHTML形式に変換する", - "当代码输出半截的时候": "コードが半分出力されたとき", - "该文件中主要包含2个函数": "このファイルには主に2つの関数が含まれています", - "提取所有块元的文本信息": "すべてのブロック要素のテキスト情報を抽出する", - "成功读取环境变量": "環境変数の読み取りに成功しました", - "更新完成": "更新が完了しました", - "第 2 步": "ステップ2", - "是否重置": "リセットしますか", - "判定为数据流的结束": "データフローの終了と判断されます", - "和 __exit__": "と __exit__", - "将英文句号": "英文句点を", - "开始接收jittorllms的回复": "jittorllmsの返信を受け取り始める", - "放到每个子线程中分别执行": "それぞれのサブスレッドに配置して実行する", - "作为一个标识而存在": "識別子として存在する", - "你提供了错误的API_KEY": "APIキーが間違っています", - "选择放弃": "キャンセルする", - "请稍等": "お待ちください", - "实时在UI上反馈远程数据流": "リアルタイムでUIにリモートデータストリームをフィードバックする", - "用于负责跨越线程传递已经输出的部分": "スレッドを越えて出力された部分を転送する責任がある", - "例如\\section": "\\セクションのように", - "打印traceback": "トレースバックを印刷する", - "可能需要分组处理": "グループ化処理が必要な場合があります", - "应急食品是“原神”游戏中的角色派蒙的外号": "緊急食品は、「原神」ゲームのキャラクターパイモンのニックネームです", - "表示函数是否成功执行": "関数が正常に実行されたかどうかを示す", - "一般原样传递下去就行": "通常はそのまま渡すだけでよい", - "琥珀色": "琥珀色", - "jittorllms 没有 sys_prompt 接口": "jittorllmsにはsys_promptインターフェースがありません", - "清除": "クリア", - "小于正文的": "本文より小さい", - "不懂就填localhost或者127.0.0.1肯定错不了": "わからない場合は、localhostまたは127.0.0.1を入力してください。間違いなく失敗します", - "用于与with语句一起使用": "with文と一緒に使用する", - "方便实现复杂的功能逻辑": "複雑な機能ロジックを実現するのに便利", - "必要时再进行切割": "必要に応じて再分割する", - "已失败": "失敗しました", - "不具备多线程能力的函数": "マルチスレッド機能を持たない関数", - "找不到任何java文件": "Javaファイルが見つかりません", - "在代理软件的设置里找": "プロキシソフトウェアの設定で検索する", - "装饰器函数": "デコレータ関数", - "不要用代码块": "コードブロックを使用しないでください", - "输入时用逗号隔开": "入力時にカンマで区切ってください", - "时": "時", - "找图片": "画像を検索する", - "把本项目源代码切换成全英文": "このプロジェクトのソースコードをすべて英語に切り替える", - "Github更新地址": "Githubの更新アドレス", - "警告!API_URL配置选项将被弃用": "警告!API_URL構成オプションは廃止されます", - "一、论文概况": "1.論文概要", - "使用线程池": "スレッドプールを使用する", - "然后请使用Markdown格式封装": "次に、Markdown形式でパッケージ化してください", - "当 输入部分的token占比 小于 全文的一半时": "入力部分のトークンの割合が全体の半分以下の場合", - "更新函数代码": "関数コードを更新する", - "也许会导致低配计算机卡死 ……": "低スペックのコンピューターがクラッシュする可能性があります......", - "sk-此处填API密钥": "sk-ここにAPIキーを入力してください", - "用于实现Python函数插件的热更新": "Python関数プラグインのホット更新を実現するために使用されます", - "缺一不可": "欠かせない", - "回滚代码到原始的浏览器打开函数": "コードを元のブラウザ開く関数にロールバックする", - "先切换模型到openai或api2d": "まず、モデルをopenaiまたはapi2dに切り替えます", - "翻译为中文": "日本語に翻訳する", - "收到": "受信", - "需要配合修改main.py才能生效!": "有効にするには、main.pyを変更する必要があります!", - "但本地存储了以下历史文件": "ただし、次の履歴ファイルがローカルに保存されています", - "一些普通功能模块": "いくつかの一般的な機能モジュール", - "把gradio的运行地址更改到指定的二次路径上": "Gradioの実行アドレスを指定された2次パスに変更する", - "第三组插件": "第3グループのプラグイン", - "避免不小心传github被别人看到": "誤ってGithubにアップロードして他の人に見られるのを避ける", - "这里其实不需要join了": "ここではjoinする必要はありません", - "改为True应用代理": "Trueに変更してプロキシを適用する", - "粉红色": "ピンク色", - "进行学术解答": "学術的な回答を行う", - "用英文逗号分割": "英語のコンマで区切る", - "文件保存到本地": "ローカルにファイルを保存する", - "将markdown转化为好看的html": "Markdownを美しいHTMLに変換する", - "灵活而简洁": "柔軟で簡潔", - "当前软件运行的端口号": "現在のソフトウェアの実行ポート番号", - "其他的排队等待": "その他の待ち行列", - "更新失败": "更新に失敗しました", - "优先级1. 获取环境变量作为配置": "優先度1. 環境変数を設定として取得する", - "Y+回车=确认": "Y+Enter=確認", - "石板色": "スレート色", - "文件读取完成": "ファイルの読み込みが完了しました", - "加载失败!": "読み込みに失敗しました!", - "已经被转化过": "すでに変換されています", - "提取文本块主字体": "テキストブロックの主フォントを抽出する", - "多线程": "マルチスレッド", - "读取pdf文件并清理其中的文本内容": "PDFファイルを読み取り、テキスト内容をクリーンアップする", - "修正值": "修正値", - "抽取可用的api-key": "利用可能なAPIキーを抽出する", - "替换操作": "置換操作", - "尚未完成全部响应": "すべての応答が完了していません", - "不受git管控": "Gitの管理外", - "10个文件为一组": "10ファイルを1グループとする", - "生成图像": "画像を生成する", - "html格式": "HTML形式", - "该文件中主要包含三个函数": "このファイルには主に3つの関数が含まれています", - "质能方程式": "質量エネルギー方程式", - "高级函数插件": "高度な関数プラグイン", - "随变按钮的回调函数注册": "可変ボタンのコールバック関数の登録", - "份搜索结果": "検索結果", - "如果浏览器没有自动打开": "ブラウザが自動的に開かない場合", - "仅支持Win平台": "Winプラットフォームのみサポート", - "模块预热": "モジュールのプレヒート", - "请解释以下代码": "以下のコードを説明してください", - "具备完备的交互功能": "完全なインタラクティブ機能を備えています", - "则给出安装建议": "インストールの提案を行います", - "既可以写": "書くことができます", - "已成功": "成功しました", - "需要用此选项防止高频地请求openai导致错误": "このオプションを使用して、openaiへの高頻度のリクエストを防止し、エラーを引き起こす必要があります", - "则终止": "停止する", - "Call MOSS fail 不能正常加载MOSS的参数": "MOSSのパラメータを正常にロードできないため、Call MOSS fail", - "依次访问网页": "ウェブページに順次アクセスする", - "暂时先这样顶一下": "一時的にこれで対処する", - "将文本按照段落分隔符分割开": "テキストを段落区切り文字で分割する", - "输入中可能存在乱码": "入力には文字化けが含まれる可能性があります", - "重置文件的创建时间": "ファイルの作成時間をリセットする", - "使每个段落之间有两个换行符分隔": "各段落の間に2つの改行を挿入する", - "读取PDF文件": "PDFファイルを読み込む", - "紫罗兰色": "バイオレット", - "如果有": "ある場合", - "使用markdown表格输出结果": "markdownテーブルを使用して結果を出力する", - "不要修改!!": "修正しないでください!!", - "的方式启动": "の方法で起動する", - "循环轮询各个线程是否执行完毕": "各スレッドが完了したかどうかを繰り返しポーリングする", - "大部分时候仅仅为了fancy的视觉效果": "ほとんどの場合、見栄えの良い視覚効果のためだけです", - "结尾除去一次": "最後に1回除去する", - "天蓝色": "スカイブルー", - "原文": "原文", - "远程返回错误": "リモートエラーが返されました", - "功能区显示开关与功能区的互动": "機能エリアの表示スイッチと機能エリアの相互作用", - "生成一个请求线程": "リクエストスレッドを生成する", - "放弃": "放棄する", - "config_private.py放自己的秘密如API和代理网址": "config_private.pyに自分のAPIやプロキシアドレスなどの秘密を入力する", - "完成全部响应": "すべての応答を完了する", - "将双空行": "2つの空行を挿入する", - "第二层列表是对话历史": "2番目のリストは会話履歴です", - "例如 v2**y 和 ss* 的默认本地协议是socks5h": "たとえば、v2 ** yとss *のデフォルトのローカルプロトコルはsocks5hです", - "此版本使用pdfminer插件": "このバージョンではpdfminerプラグインが使用されています", - "下载中": "ダウンロード中", - "多线程润色开始": "マルチスレッドの改善が開始されました", - "这个函数是用来获取指定目录下所有指定类型": "この関数は、指定されたディレクトリ内のすべての指定されたタイプを取得するために使用されます", - "如果要使用jittorllms": "jittorllmsを使用する場合", - "可以多线程并行": "マルチスレッド並列処理が可能です", - "HotReload 的意思是热更新": "HotReloadの意味はホット更新です", - "失败": "失敗しました", - "proxies格式错误": "プロキシの形式が正しくありません", - "您可能选择了错误的模型或请求源": "間違ったモデルまたはリクエストソースを選択した可能性があります", - "内容太长了都会触发token数量溢出的错误": "コンテンツが長すぎると、トークン数がオーバーフローするエラーが発生する可能性があります", - "建议": "提案する", - "可能需要一点时间下载参数": "パラメータのダウンロードに少し時間がかかる場合があります", - "这里是特殊函数插件的高级参数输入区": "ここは特殊関数プラグインの高度なパラメータ入力エリアです", - "ChatGPT综合": "ChatGPT総合", - "等待多线程操作": "マルチスレッド操作を待機しています", - "按Shift+Enter换行": "Shift + Enterで改行", - "inputs 是本次问询的输入": "inputsは今回の問い合わせの入力です", - "单$包裹begin命令时多余": "beginコマンドを単一の$で囲むと余分になります", - "NEWBING_COOKIES未填写或有格式错误": "NEWBING_COOKIESが入力されていないか、形式が正しくありません", - "直接取出来": "直接取り出す", - "懂的都懂": "理解できる人は理解する", - "常规情况下": "通常の場合", - "给出输出文件清单": "出力ファイルリストを提供する", - "如果OpenAI不响应": "OpenAIが応答しない場合", - "尽可能多地保留文本": "テキストをできるだけ多く保持する", - "对话历史列表": "会話履歴リスト", - "不可多线程": "マルチスレッドはできません", - "解析整个CSharp项目": "CSharpプロジェクト全体を解析する", - "此线程失败前收到的回答": "このスレッドが失敗する前に受け取った回答", - "等待MOSS响应中": "MOSSの応答を待っています", - "对每一个源代码文件": "各ソースコードファイルに対して", - "爬取搜索引擎的结果": "検索エンジンの結果をクロールする", - "找不到任何.tex或pdf文件": ".texまたはpdfファイルが見つかりません", - "AutoGPT是什么": "AutoGPTとは何ですか", - "空空如也的输入栏": "空の入力欄", - "除了基础的pip依赖以外": "基本的なpip依存関係以外", - "你必须使用Markdown表格": "Markdownテーブルを使用する必要があります", - "该函数面向希望实现更多有趣功能的开发者": "この関数は、より多くの面白い機能を実装したい開発者を対象としています", - "需要访问谷歌": "Googleにアクセスする必要があります", - "5s之后重启": "5秒後に再起動します", - "删除其中的所有注释": "すべてのコメントを削除する", - "、地址": "、アドレス", - "请使用Markdown": "Markdownを使用してください", - "文件代码是": "ファイルのコードは", - "洋红色": "マゼンタ", - "已配置": "設定済み", - "分析用户提供的谷歌学术": "ユーザーが提供したGoogle Scholarの分析", - "句子结束标志": "文の終わりのマーク", - "尝试导入依赖": "依存関係のインポートを試みる", - "authors获取失败": "著者の取得に失敗しました", - "发送至chatGPT": "chatGPTに送信", - "添加一个萌萌的看板娘": "かわいい看板娘を追加する", - "记录删除注释后的文本": "コメントを削除したテキストを記録する", - "在读取API_KEY时": "API_KEYの読み取り時", - "每一块": "各ブロック", - "避免解析压缩文件": "圧縮ファイルの解析を避ける", - "接下来请你逐文件分析下面的论文文件": "次に、論文ファイルを1つずつ分析してください", - "Endpoint 重定向": "エンドポイントのリダイレクト", - "截断重试": "切り捨て再試行", - "限制的3/4时": "制限の3/4時", - "Windows上还需要安装winrar软件": "Windowsにはwinrarソフトウェアのインストールが必要です", - "插件": "プラグイン", - "输入过长已放弃": "入力が長すぎるため、放棄しました", - "界面更新": "インターフェースの更新", - "每个子任务的输出汇总": "各サブタスクの出力の集計", - "翻译摘要等": "要約などを翻訳する", - "网络卡顿、代理失败、KEY失效": "ネットワークの遅延、プロキシの失敗、KEYの無効化", - "前情提要": "前提の要約", - "additional_fn代表点击的哪个按钮": "additional_fnは、クリックされたボタンを表します", - "再点击按钮": "ボタンを再度クリック", - "等待回复": "返信を待つ", - "$c$是光速": "$c$は光速です", - "触发重置": "リセットをトリガーする", - "借鉴了 https": "httpsを参考にしました", - "追加历史": "履歴を追加する", - "就是临时文件夹的路径": "一時フォルダのパスです", - "开始正式执行任务": "タスクを正式に実行する", - "第一种情况": "1つ目の場合", - "对从 PDF 提取出的原始文本进行清洗和格式化处理": "PDFから抽出された元のテキストをクリーニングおよびフォーマット処理する", - "请结合互联网信息回答以下问题": "以下の問題にインターネット情報を組み合わせて回答してください", - "请你阅读以下学术论文相关的材料": "以下の学術論文に関連する資料を読んでください", - "注意": "注意", - "由于请求gpt需要一段时间": "GPTのリクエストには時間がかかるため", - "可以直接修改对话界面内容": "対話インターフェースの内容を直接変更できます", - "系统输入": "システム入力", - "包括": "含む", - "效果奇好": "効果が非常に良い", - "配置其Path环境变量": "そのPath環境変数を設定する", - "如温度和top_p等": "温度やtop_pなど", - "可选 ↓↓↓": "選択可能 ↓↓↓", - "代理可能无效": "プロキシは無効かもしれません", - "例如": "例えば", - "青色": "青色", - "一言以蔽之": "一言で言えば", - "直接给定文件": "ファイルを直接指定する", - "分组+迭代处理": "グループ化+反復処理", - "文件上传区": "ファイルアップロードエリア", - "3. 如果余量太小了": "3. もし余剰が少なすぎる場合", - "执行时": "実行時", - "localhost意思是代理软件安装在本机上": "localhostは、プロキシソフトウェアがローカルマシンにインストールされていることを意味します", - "下面是对每个参数和返回值的说明": "以下は各パラメーターおよび戻り値の説明です", - "存档文件详情": "アーカイブファイルの詳細", - "找不到任何.ipynb文件": "IPython Notebookファイルが見つかりません", - "里面包含以指定类型为后缀名的所有文件的绝对路径": "指定されたタイプの拡張子を持つすべてのファイルの絶対パスを含む", - "个片段": "フラグメント", - "Index 2 框框": "インデックス2フレーム", - "更换LLM模型/请求源": "LLMモデル/リクエストソースの変更", - "安装Newbing的依赖": "Newbingの依存関係のインストール", - "不会实时显示在界面上": "リアルタイムで画面に表示されない", - "第2步": "ステップ2", - "有$标识的公式符号": "$記号を持つ数式記号", - "读Tex论文写摘要": "Tex論文を読んで要約を書く", - "不详": "詳細不明", - "也可以直接是": "直接であることもできます", - "找不到任何CSharp文件": "CSharpファイルが見つかりません", - "输入其他/无输入+回车=不更新": "他の入力/入力なし+ Enter = 更新しない", - "然后再写一段英文摘要": "そして、もう一つの英文要約を書く", - "捕捉函数f中的异常并封装到一个生成器中返回": "関数fで例外をキャッチして、ジェネレータにエンコードして返す", - "重试几次": "数回リトライする", - "线程": "スレッド", - "程序终止": "プログラムの終了", - "用户提示": "ユーザーヒント", - "条": "条項", - "刷新界面用 yield from update_ui": "UIを更新するには、yield from update_uiを使用します", - "如何理解传奇?": "伝説を理解するには?", - "请避免混用多种jittor模型": "複数のjittorモデルを混在させないでください", - "说": "言う", - "您可以请再次尝试.": "もう一度お試しください。", - "尝试识别section": "セクションを識別しようとしています", - "警告!被保存的对话历史可以被使用该系统的任何人查阅": "警告!保存された対話履歴は、このシステムを使用する誰でも閲覧できます", - "Index 1 字体": "フォント1のインデックス", - "分解代码文件": "コードファイルの分解", - "越新越好": "新しいほど良い", - "当历史上下文过长时": "履歴のコンテキストが長すぎる場合", - "这是第": "これは第", - "网络代理状态": "ネットワークプロキシの状態", - "用于数据流可视化": "データフローの可視化に使用される", - "整理history": "履歴の整理", - "一-鿿": "一-鿿", - "所有文件都总结完成了吗": "すべてのファイルが要約されていますか?", - "默认False": "デフォルトはFalse", - "这是必应": "これはBingです", - "子进程Worker": "サブプロセスWorker", - "重试中": "再試行中", - "正常对话时使用": "通常の会話時に使用する", - "直接清除历史": "履歴を直接クリアする", - "处理数据流的主体": "データフローの本体を処理する", - "试着补上后个": "後のものを試してみてください", - "功能、贡献者": "機能、貢献者", - "请先转化为.docx格式": "まず.docx形式に変換してください", - "可用clear将其清空": "clearを使用してクリアできます", - "需要预先pip install rarfile": "rarfileを事前にpip installする必要があります", - "输入已识别为openai的api_key": "openaiのapi_keyとして認識された入力", - "先上传存档或输入路径": "アーカイブをアップロードするか、パスを入力してください", - "则先将公式转换为HTML格式": "公式をHTML形式に変換してください", - "需要读取和清理文本的pdf文件路径": "テキストを読み取り、クリーンアップする必要があるpdfファイルのパス", - "自动定位": "自動位置決め", - "api2d 正常完成": "api2dが正常に完了しました", - "获取页面上的文本信息": "ページからテキスト情報を取得する", - "日": "日", - "已经对该文章的所有片段总结完毕": "記事のすべてのセグメントを要約しました", - "搜集初始信息": "初期情報を収集する", - "本组文件为": "このグループのファイルは", - "正常": "正常", - "比如introduction": "例えば、導入", - "并在被装饰的函数上执行": "デコレートされた関数で実行する", - "文件路径列表": "ファイルパスリスト", - "由于输入长度限制": "入力長の制限のため", - "祖母绿": "エメラルドグリーン", - "并替换为空字符串": "空の文字列に置き換える", - "存入": "保存する", - "OpenAI绑定信用卡可解除频率限制": "OpenAIはクレジットカードをバインドして頻度制限を解除できます", - "获取预处理函数": "前処理関数を取得する", - "Bad forward key. API2D账户额度不足": "不正なフォワードキー。API2Dアカウントの残高が不足しています", - "源文件太多": "ソースファイルが多すぎます", - "谷歌学术检索助手": "Google学術検索アシスタント", - "方法则会被调用": "メソッドが呼び出されます", - "默认是.md": "デフォルトは.mdです", - "请开始多线程操作": "マルチスレッド操作を開始してください", - "蓝色": "青色", - "如果是网络上的文件": "ネットワーク上のファイルの場合", - "开始下一个循环": "次のループを開始する", - "更换模型 & SysPrompt & 交互界面布局": "モデルの変更&SysPrompt&インタラクティブインターフェイスレイアウト", - "二、论文翻译": "2.論文翻訳", - "再失败就没办法了": "もう失敗したらどうしようもない", - "解析整个Java项目": "Javaプロジェクト全体を解析する", - "只裁剪历史": "履歴のトリミングのみ", - "基础功能区": "基本機能エリア", - "gradio可用颜色列表": "利用可能なGradioの色のリスト", - "的高级参数说明": "高度なパラメータの説明", - "是否在arxiv中": "arxivにあるかどうか", - "提交": "提出", - "回车退出": "Enterで終了", - "详情见get_full_error的输出": "get_full_errorの出力を参照してください", - "您可以随时在history子文件夹下找回旧版的程序": "いつでもhistoryサブフォルダーで以前のバージョンのプログラムを取得できます", - "手动指定和筛选源代码文件类型": "ソースコードファイルタイプを手動で指定およびフィルタリングする", - "更多函数插件": "その他の関数プラグイン", - "看门狗的耐心": "監視犬の忍耐力", - "然后yeild出去": "そして出力する", - "拆分过长的IPynb文件": "長すぎるIPynbファイルを分割する", - "1. 把input的余量留出来": "1. 入力の余裕を残す", - "请求超时": "リクエストがタイムアウトしました", - "是之前的对话列表": "以前の会話リストです", - "有些文章的正文部分字体大小不是100%统一的": "一部の記事の本文のフォントサイズが100%統一されていない場合があります", - "加载参数": "パラメータをロードする", - "在汇总报告中隐藏啰嗦的真实输入": "冗長な実際の入力をサマリーレポートで非表示にする", - "获取完整的从Openai返回的报错": "Openaiから返された完全なエラーを取得する", - "灰色": "グレー", - "表示要搜索的文件类型": "検索するファイルタイプを示します", - "亲人两行泪": "家族の2行の涙", - "等待NewBing响应中": "NewBingの応答を待っています", - "请复制并转到以下URL": "以下のURLをコピーして移動してください", - "开始接收chatglm的回复": "chatglmの返信を受け取り始めます", - "第6步": "ステップ6", - "可调节线程池的大小避免openai的流量限制错误": "OpenAIのトラフィック制限エラーを回避するためにスレッドプールのサイズを調整できます", - "等待响应": "レスポンスを待っています", - "月": "月", - "裁剪时": "トリミング中", - "异步任务结束": "非同期タスクが終了しました", - "正在处理中": "処理中", - "润色": "校正中", - "提取精炼信息": "情報の抽出と精製", - "您可以试试让AI写一个Related Works": "AIにRelated Worksを書かせてみることができます", - "主进程统一调用函数接口": "メインプロセスが関数インターフェースを統一的に呼び出します", - "再例如一个包含了待处理文件的路径": "処理待ちのファイルを含むパスの例", - "负责把学术论文准确翻译成中文": "学術論文を正確に中国語に翻訳する責任があります", - "函数的说明请见 request_llms/bridge_all.py": "関数の説明については、request_llms/bridge_all.pyを参照してください", - "然后回车提交": "そしてEnterを押して提出してください", - "防止爆token": "トークンの爆発を防止する", - "Latex项目全文中译英": "LaTeXプロジェクト全文の中国語から英語への翻訳", - "递归地切割PDF文件": "PDFファイルを再帰的に分割する", - "使用该模块需要额外依赖": "このモジュールを使用するには、追加の依存関係が必要です", - "放到history中": "履歴に保存する", - "汇总报告如何远程获取": "サマリーレポートをリモートで取得する方法", - "清空历史": "履歴をクリアする", - "代理所在地查询超时": "プロキシの場所のクエリがタイムアウトしました", - "列表": "リスト", - "检测到程序终止": "プログラムの終了が検出されました", - "重命名文件": "ファイル名を変更する", - "用&符号分隔": "&記号で分割する", - "LLM的内部调优参数": "LLMの内部チューニングパラメータ", - "建议您复制一个config_private.py放自己的秘密": "config_private.pyをコピーして、自分の秘密を入れてください", - "$m$是质量": "質量を表します", - "具备多线程调用能力的函数": "マルチスレッド呼び出し機能を備えた関数", - "将普通文本转换为Markdown格式的文本": "通常のテキストをMarkdown形式のテキストに変換する", - "rar和7z格式正常": "rarおよび7z形式が正常である", - "使用wraps": "wrapsを使用する", - "带超时倒计时": "タイムアウトカウントダウン付き", - "准备对工程源代码进行汇总分析": "プロジェクトソースコードの集計分析を準備する", - "未知": "不明", - "第n组插件": "n番目のプラグイン", - "ChatGLM响应异常": "ChatGLMの応答が異常です", - "使用Unsplash API": "Unsplash APIを使用する", - "读取默认值作为数据类型转换的参考": "デフォルト値を読み取り、データ型変換の参考にする", - "请更换为API_URL_REDIRECT配置": "API_URL_REDIRECT構成に変更してください", - "青蓝色": "青色と青緑色", - "如果中文效果不理想": "中国語の効果が理想的でない場合", - "Json异常": "Json例外", - "chatglm 没有 sys_prompt 接口": "chatglmにはsys_promptインターフェースがありません", - "停止": "停止", - "的文件": "のファイル", - "可能处于折叠状态": "折りたたみ状態になっている可能性があります", - "但还没输出完后面的": "しかし、まだ後ろの出力が完了していません", - "单线程方法": "シングルスレッドメソッド", - "不支持通过环境变量设置!": "環境変数を介して設定することはできません!", - "“喂狗”": "「犬に餌をやる」", - "获取设置": "設定を取得する", - "Json解析不合常规": "Json解析が通常と異なる", - "请对下面的程序文件做一个概述文件名是": "以下のプログラムファイルについて概要を説明してください。ファイル名は", - "输出": "出力", - "这个函数用stream的方式解决这个问题": "この関数はストリームを使用してこの問題を解決します", - "根据 heuristic 规则": "ヒューリスティックルールに従って", - "假如重启失败": "再起動に失敗した場合", - "然后在用常规的": "その後、通常の方法を使用する", - "加入下拉菜单中": "ドロップダウンメニューに追加する", - "正在分析一个项目的源代码": "プロジェクトのソースコードを分析しています", - "从以上搜索结果中抽取信息": "上記の検索結果から情報を抽出する", - "安全第一条": "安全が最優先です", - "并相应地进行替换": "適切に置換する", - "第5次尝试": "5回目の試み", - "例如在windows cmd中": "例えば、Windowsのcmdで", - "打开你的*学*网软件查看代理的协议": "あなたの*学*ウェブソフトウェアを開いて、プロキシプロトコルを確認する", - "用多种方式组合": "複数の方法を組み合わせる", - "找不到任何.h头文件": ".hヘッダーファイルが見つかりません", - "是本次问询的输入": "この問い合わせの入力です", - "并替换为回车符": "改行文字に置換する", - "不能自定义字体和颜色": "フォントと色をカスタマイズできません", - "点击展开“文件上传区”": "「ファイルアップロードエリア」をクリックして展開する", - "高危设置!通过修改此设置": "高危険設定!この設定を変更することで", - "开始重试": "再試行を開始する", - "你是一个学术翻译": "あなたは学術翻訳者です", - "表示要搜索的文件或者文件夹路径或网络上的文件": "検索するファイルまたはフォルダのパスまたはネットワーク上のファイルを示す", - "没办法了": "どうしようもない", - "优先级3. 获取config中的配置": "優先度3. configから設定を取得する", - "读取配置文件": "設定ファイルを読み込む", - "查询版本和用户意见": "バージョンとユーザーの意見を検索する", - "提取摘要": "要約を抽出する", - "在gpt输出代码的中途": "GPTがコードを出力する途中で", - "如1024x1024": "1024x1024のように", - "概括其内容": "内容を要約する", - "剩下的情况都开头除去": "残りの場合はすべて先頭を除去する", - "至少一个线程任务意外失败": "少なくとも1つのスレッドタスクが予期しない失敗をした", - "完成情况": "完了状況", - "输入栏用户输入的文本": "入力欄にユーザーが入力したテキスト", - "插件调度异常": "プラグインスケジューリングの例外", - "插件demo": "プラグインデモ", - "chatGPT分析报告": "chatGPT分析レポート", - "以下配置可以优化体验": "以下の設定で体験を最適化できます", - "是否一键更新代码": "コードをワンクリックで更新するかどうか", - "pip install pywin32 用于doc格式": "doc形式に使用するためのpip install pywin32", - "如果同时InquireMultipleLargeLanguageModels": "同時にInquireMultipleLargeLanguageModelsを使用する場合", - "整理反复出现的控件句柄组合": "繰り返し出現するコントロールハンドルの組み合わせを整理する", - "可能会导致严重卡顿": "重度のカクつきを引き起こす可能性がある", - "程序完成": "プログラム完了", - "在装饰器内部": "デコレーターの内部で", - "函数插件功能": "関数プラグイン機能", - "把完整输入-输出结果显示在聊天框": "完全な入力-出力結果をチャットボックスに表示する", - "对全文进行概括": "全文を要約する", - "HotReload的装饰器函数": "HotReloadのデコレーター関数", - "获取tokenizer": "tokenizerを取得する", - "则随机选取WEB端口": "WEBポートをランダムに選択する", - "解析项目": "プロジェクトを解析する", - "并且不要有反斜线": "そしてバックスラッシュを含めないでください", - "汇总报告已经添加到右侧“文件上传区”": "サマリーレポートはすでに右側の「ファイルアップロードエリア」に追加されています", - "装饰器函数返回内部函数": "デコレーター関数は内部関数を返します", - "根据以上你自己的分析": "上記の分析に基づいて自分自身を分析する", - "只输出代码": "コードのみを出力する", - "并执行函数的新版本": "関数の新バージョンを実行する", - "请不吝PR!": "PRを遠慮なく提出してください!", - "你好": "こんにちは", - "或者您没有获得体验资格": "またはあなたは体験資格を持っていない", - "temperature是chatGPT的内部调优参数": "temperatureはchatGPTの内部調整パラメータです", - "结果写入文件": "結果をファイルに書き込む", - "输入区": "入力エリア", - "这段代码定义了一个名为DummyWith的空上下文管理器": "このコードは、DummyWithという名前の空のコンテキストマネージャを定義しています", - "加载需要一段时间": "読み込みには時間がかかります", - "和端口": "およびポート", - "当你想发送一张照片时": "写真を送信したい場合", - "为了更好的效果": "より良い効果を得るために", - "逻辑较乱": "ロジックがやや乱雑です", - "调用路径参数已自动修正到": "呼び出しパスのパラメータが自動的に修正されました", - "地址🚀": "アドレス🚀", - "也可以获取它": "それを取得することもできます", - "pip install python-docx 用于docx格式": "pip install python-docxはdocx形式に使用されます", - "该模板可以实现ChatGPT联网信息综合": "このテンプレートは、ChatGPTネットワーク情報の総合を実現できます", - "的标识": "のマーク", - "取决于": "に依存する", - "ChatGLM尚未加载": "ChatGLMはまだロードされていません", - "处理多模型并行等细节": "複数のモデルの並列処理などの詳細を処理する", - "代理与自动更新": "プロキシと自動更新", - "摘要在 .gs_rs 中的文本": ".gs_rs中の要約テキスト", - "补上后面的": "後ろに補完する", - "输入了已经经过转化的字符串": "変換済みの文字列が入力されました", - "对整个Latex项目进行润色": "全体のLatexプロジェクトを磨き上げる", - "即将更新pip包依赖……": "pipパッケージ依存関係を更新する予定...", - "ダウンロードしたpdfファイルが失敗しました": "PDFファイルのダウンロードに失敗しました", - "何もありません": "何もありません", - "次の文字が大文字である場合": "次の文字が大文字である場合", - "yield一次以刷新前端页面": "フロントエンドページを更新するためにyieldを1回実行します", - "入力部分が自由すぎる": "入力部分が自由すぎる", - "中文Latex项目全文润色": "中国語のLatexプロジェクトの全文を校正する", - "ファイルを読み込む": "ファイルを読み込む", - "プライバシー保護に注意してください!": "プライバシー保護に注意してください!", - "ただし、途中でネットワークケーブルが切断されることを避けるために内部でストリームを使用する": "ただし、途中でネットワークケーブルが切断されることを避けるために内部でストリームを使用する", - "上下レイアウト": "上下レイアウト", - "historyは以前の会話リストです": "historyは以前の会話リストです", - "pdfファイルを読み込む": "pdfファイルを読み込む", - "同時に長い文を分解する": "同時に長い文を分解する", - "Unsplash APIを使用する": "Unsplash APIを使用する", - "各llmモデルに単体テストを実行する": "各llmモデルに単体テストを実行する", - "ローカルで使用する場合はお勧めしません": "ローカルで使用する場合はお勧めしません", - "亜鉛色": "亜鉛色", - "論文": "論文", - "1つの大規模言語モデルのみに問い合わせる場合": "1つの大規模言語モデルのみに問い合わせる場合", - "会話履歴": "会話履歴", - "入力をトリミングする": "入力をトリミングする", - "第2部分": "第2部分", - "gpt4は現在、申請が承認された人のみに公開されています": "gpt4は現在、申請が承認された人のみに公開されています", - "以下は学術論文の基本情報です": "以下は学術論文の基本情報です", - "出力が不完全になる原因となる": "出力が不完全になる原因となる", - "ハイフンを使って": "ハイフンを使って", - "请先把模型切换至gpt-xxxx或者api2d-xxxx": "Please switch the model to gpt-xxxx or api2d-xxxx first.", - "路径或网址": "Path or URL", - "*代表通配符": "* represents a wildcard", - "块元提取": "Block element extraction", - "使用正则表达式查找注释": "Use regular expressions to find comments", - "但推荐上传压缩文件": "But it is recommended to upload compressed files", - "实现更换API_URL的作用": "Implement the function of changing API_URL", - "从摘要中提取高价值信息": "Extract high-value information from the summary", - "警告": "Warning", - "ChatGLM消耗大量的内存": "ChatGLM consumes a lot of memory", - "历史中哪些事件发生在": "Which events happened in history", - "多线": "Multi-threaded", - "石头色": "Stone color", - "NewBing响应缓慢": "NewBing responds slowly", - "生成一份任务执行报告": "Generate a task execution report", - "用空格或段落分隔符替换原换行符": "Replace the original line break with a space or paragraph separator", - "其他小工具": "Other small tools", - "当前问答": "Current Q&A", - "支持任意数量的llm接口": "Support any number of llm interfaces", - "在传递chatbot的过程中不要将其丢弃": "Do not discard it in the process of passing chatbot", - "2. 把输出用的余量留出来": "2. Leave room for the output", - "稍后可能需要再试一次": "May need to try again later", - "显示/隐藏功能区": "Show/hide the function area", - "拆分过长的latex文件": "Split overly long latex files", - "子进程执行": "Subprocess execution", - "排除了以上两个情况": "Excludes the above two cases", - "您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!": "You will completely expose your API-KEY and conversation privacy to the intermediary you set!", - "表示文件所在的文件夹路径": "Indicates the folder path where the file is located", - "获取正文主字体": "本文フォントを取得する", - "中文学术润色": "中国語の学術的な磨きをかける", - "i_say_show_user=给用户看的提问": "ユーザーに表示される質問", - "需要清除首尾空格": "先頭と末尾の空白を削除する必要があります", - "请你作为一个学术翻译": "学術翻訳者としてお願いします", - "中译英": "中国語から英語への翻訳", - "chatGPT的内部调优参数": "chatGPTの内部調整パラメータ", - "test_解析一个Cpp项目": "Cppプロジェクトの解析をテストする", - "默认开启": "デフォルトで有効になっています", - "第三方库": "サードパーティのライブラリ", - "如果需要在二级路径下运行": "2次パスで実行する必要がある場合", - "chatGPT 分析报告": "chatGPT分析レポート", - "不能正常加载ChatGLM的参数!": "ChatGLMのパラメータを正常にロードできません!", - "并定义了一个名为decorated的内部函数": "内部関数decoratedを定義しました", - "所有线程同时开始执行任务函数": "すべてのスレッドが同時にタスク関数を開始します", - "Call jittorllms fail 不能正常加载jittorllms的参数": "jittorllmsのパラメータを正常にロードできません", - "任何文件": "任意のファイル", - "分解连字": "リガチャの分解", - "如果子任务非常多": "サブタスクが非常に多い場合", - "如果要使用ChatGLM": "ChatGLMを使用する場合", - "**函数功能**": "**関数の機能**", - "等待jittorllms响应中": "jittorllmsの応答を待っています", - "查找语法错误": "構文エラーを検索する", - "尝试识别段落": "段落を認識しようとする", - "下载PDF文档": "PDF文書をダウンロードする", - "搜索页面中": "ページ内を検索する", - "然后回车键提交后即可生效": "Enterキーを押して送信すると有効になります", - "请求处理结束": "リクエスト処理が終了しました", - "按钮见functional.py": "functional.pyにあるボタン", - "提交按钮、重置按钮": "送信ボタン、リセットボタン", - "网络错误": "ネットワークエラー", - "第10步": "10番目のステップ", - "问号": "質問符", - "两个指令来安装jittorllms的依赖": "jittorllmsの依存関係をインストールするための2つの命令", - "询问多个GPT模型": "複数のGPTモデルについて問い合わせる", - "增强报告的可读性": "レポートの可読性を向上させる", - "如果缺少依赖": "依存関係が不足している場合", - "比如你是翻译官怎样怎样": "例えば、あなたが翻訳者である場合の方法", - "MOSS尚未加载": "MOSSがまだロードされていません", - "第一部分": "第1部分", - "的分析如下": "の分析は以下の通りです", - "解决一个mdx_math的bug": "mdx_mathのバグを解決する", - "函数插件输入输出接驳区": "関数プラグインの入出力接続エリア", - "打开浏览器": "ブラウザを開く", - "免费用户填3": "無料ユーザーは3を入力してください", - "版": "版", - "不需要重启程序": "プログラムを再起動する必要はありません", - "正在查找对话历史文件": "会話履歴ファイルを検索しています", - "内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块": "内部関数は、importlibモジュールのreload関数とinspectモジュールのgetmodule関数を使用して、関数モジュールを再ロードおよび取得します", - "解析整个C++项目": "C++プロジェクト全体を解析する", - "函数热更新是指在不停止程序运行的情况下": "関数のホットアップデートとは、プログラムの実行を停止せずに行うことを指します", - "代码高亮": "コードのハイライト", - "否则在回复时会因余量太少出问题": "そうしないと、返信時に余裕が少なすぎて問題が発生する可能性があります", - "该函数详细注释已添加": "この関数には詳細な注釈が追加されました", - "默认允许多少路线程同时访问OpenAI": "デフォルトでOpenAIに同時にアクセスできるスレッド数はいくつですか", - "网络的远程文件": "リモートファイルのネットワーク", - "搜索需要处理的文件清单": "処理する必要のあるファイルリストを検索する", - "提交任务": "タスクを提出する", - "根据以上的对话": "上記の対話に基づいて", - "提示": "ヒント", - "然后重试": "その後、再試行してください", - "只输出转化后的英文代码": "変換後の英語コードのみを出力する", - "GPT返回的结果": "GPTが返す結果", - "您的 API_KEY 是": "あなたのAPI_KEYは", - "给gpt的静默提醒": "GPTに対するサイレントリマインダー", - "先寻找到解压的文件夹路径": "解凍されたフォルダのパスを最初に検索する", - "”补上": "補う", - "清除重复的换行": "重複する改行をクリアする", - "递归": "再帰", - "把已经获取的数据显示出去": "取得したデータを表示する", - "参数": "パラメータ", - "已完成": "完了しました", - "方法会在代码块被执行前被调用": "メソッドはコードブロックが実行される前に呼び出されます", - "第一次运行": "最初の実行", - "does not exist. 模型不存在": "存在しません。モデルが存在しません", - "每个子任务展现在报告中的输入": "レポートに表示される各サブタスクの入力", - "response中会携帯traceback报错信息": "responseにはtracebackエラー情報が含まれます", - "在实验过程中发现调用predict_no_ui处理长文档时": "実験中に、predict_no_uiを呼び出して長いドキュメントを処理することがわかりました", - "发送图片时": "画像を送信するとき", - "如果换行符前为句子结束标志": "改行記号の前に文の終わりの記号がある場合", - "获取图片URL": "画像のURLを取得する", - "提取字体大小是否近似相等": "フォントサイズを抽出して近似しているかどうかを確認する", - "填写之前不要忘记把USE_PROXY改成True": "記入する前に、USE_PROXYをTrueに変更することを忘れないでください", - "列举两条并发送相关图片": "List two and send related pictures", - "第一层列表是子任务分解": "The first level list is subtask decomposition", - "把newbing的长长的cookie放到这里": "Put Newbing's long cookie here", - "不输入即全部匹配": "No input means all matches", - "不输入代表全部匹配": "No input means all matches", - "请对下面的文章片段用中文做一个概述": "Please summarize the following article fragment in Chinese", - "迭代之前的分析": "Analysis before iteration", - "返回一个新的字符串": "Return a new string", - "可同时填写多个API-KEY": "Multiple API-KEYs can be filled in at the same time", - "乱七八糟的后处理": "Messy post-processing", - "然后回答问题": "Then answer the question", - "是否唤起高级插件参数区": "Whether to call the advanced plugin parameter area", - "判定为不是正文": "Determined as not the main text", - "输入区2": "Input area 2", - "来自EdgeGPT.py": "From EdgeGPT.py", - "解释代码": "Explain the code", - "直接在输入区键入api_key": "Enter the api_key directly in the input area", - "文章内容是": "The content of the article is", - "也可以在问题输入区输入临时的api-key": "You can also enter a temporary api-key in the question input area", - "不需要高级参数": "No advanced parameters required", - "下面是一些学术文献的数据": "Below are some data on academic literature", - "整理结果": "Organized results", - "不能加载Newbing组件": "Cannot load Newbing component", - "仅仅服务于视觉效果": "Only serves visual effects", - "主进程执行": "Main process execution", - "请耐心完成后再提交新问题": "Please submit a new question after completing it patiently", - "找不到任何.docx或doc文件": "Cannot find any .docx or .doc files", - "修改函数插件代码后": "After modifying the function plugin code", - "TGUI不支持函数插件的实现": "TGUIは関数プラグインの実装をサポートしていません", - "不要修改任何LaTeX命令": "LaTeXコマンドを変更しないでください", - "安装方法": "インストール方法", - "退出": "終了", - "由于您没有设置config_private.py私密配置": "config_private.pyのプライベート設定が設定されていないため", - "查询代理的地理位置": "プロキシの地理的位置を検索する", - "Token限制下的截断与处理": "トークン制限下の切り捨てと処理", - "python 版本建议3.9+": "Pythonバージョン3.9+を推奨します", - "如果是.doc文件": ".docファイルの場合", - "跨平台": "クロスプラットフォーム", - "输入谷歌学术搜索页url": "Google Scholar検索ページのURLを入力してください", - "高级参数输入区的显示提示": "高度なパラメータ入力エリアの表示ヒント", - "找不到任何.md文件": ".mdファイルが見つかりません", - "请对下面的文章片段用中文做概述": "以下の記事の断片について、中国語で概要を説明してください", - "用户界面对话窗口句柄": "ユーザーインターフェースの対話ウィンドウハンドル", - "chatGPT对话历史": "chatGPTの対話履歴", - "基础功能区的回调函数注册": "基本機能エリアのコールバック関数の登録", - "根据给定的匹配结果来判断换行符是否表示段落分隔": "与えられた一致結果に基づいて、改行記号が段落の区切りを表すかどうかを判断する", - "第2次尝试": "2回目の試み", - "布尔值": "ブール値", - "您既可以在config.py中修改api-key": "config.pyでapi-keyを変更することができます", - "清理后的文本内容字符串": "クリーンアップされたテキストコンテンツ文字列", - "去除短块": "短いブロックを削除する", - "利用以上信息": "上記情報を利用する", - "从而达到实时更新功能": "これにより、リアルタイム更新機能が実現されます", - "第5步": "5番目のステップ", - "载入对话历史文件": "対話履歴ファイルを読み込む", - "修改它": "それを変更する", - "正在执行一些模块的预热": "モジュールのプレウォームを実行しています", - "避免包括解释": "解釈を含めないようにする", - "使用 lru缓存 加快转换速度": "変換速度を高速化するためにlruキャッシュを使用する", - "与gradio版本和网络都相关": "gradioバージョンとネットワークに関連しています", - "以及代理设置的格式是否正确": "およびプロキシ設定の形式が正しいかどうか", - "OpenAI所允许的最大并行过载": "OpenAIが許可する最大並列過負荷", - "代码开源和更新": "コードのオープンソース化と更新", - "网络等出问题时": "ネットワークなどに問題が発生した場合", - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开": "1.英語のタイトル;2.中国語のタイトルの翻訳;3.著者;4.arxiv公開", - "发送 GET 请求": "GETリクエストを送信する", - "向chatbot中添加简单的意外错误信息": "チャットボットに簡単な予期しないエラーメッセージを追加する", - "代理配置": "プロキシの設定", - "这个函数运行在主进程": "この関数はメインプロセスで実行されます", - "找不到任何lua文件": "luaファイルが見つかりません", - "降低请求频率中": "リクエスト頻度を低下させる", - "迭代地历遍整个文章": "記事全体を反復処理する", - "否则将导致每个人的NewBing问询历史互相渗透": "さもないと、各人のNewBingクエリ履歴が相互に浸透する可能性があります", - "并修改代码拆分file_manifest列表": "コードを変更して、file_manifestリストを分割する", - "第 0 步": "ステップ0", - "提高限制请查询": "制限を引き上げるには、クエリを確認してください", - "放在这里": "ここに置いてください", - "红色": "赤色", - "上传本地文件可供红色函数插件调用": "ローカルファイルをアップロードして、赤い関数プラグインを呼び出すことができます", - "正在加载tokenizer": "トークナイザーをロードしています", - "非OpenAI官方接口的出现这样的报错": "OpenAI公式インターフェース以外でこのようなエラーが発生する", - "跨线程传递": "スレッド間での伝達", - "代码直接生效": "コードが直接有効になる", - "基本信息": "基本情報", - "默认": "#", - "首先你在英文语境下通读整篇论文": "最初に、論文全体を英語で読みます", - "的第": "の", - "第9步": "9番目のステップ", - "gpt模型参数": "GPTモデルのパラメータ", - "等待": "待つ", - "一次性完成": "一度に完了する", - "收到以下文件": "以下のファイルを受け取りました", - "生成正则表达式": "正規表現を生成する", - "参数简单": "パラメータは簡単です", - "设置一个token上限": "トークンの上限を設定する", - "i_say=真正给chatgpt的提问": "i_say=ChatGPTに本当の質問をする", - "请刷新界面重试": "ページを更新して再試行してください", - "对程序的整体功能和构架重新做出概括": "プログラムの全体的な機能と構造を再概要化する", - "以下是一篇学术论文中的一段内容": "以下は学術論文の一部です", - "您可以调用“LoadConversationHistoryArchive”还原当下的对话": "「LoadConversationHistoryArchive」を呼び出して、現在の会話を復元できます", - "读取Markdown文件": "Markdownファイルを読み込む", - "最终": "最終的に", - "或显存": "またはグラフィックスメモリ", - "如果最后成功了": "最後に成功した場合", - "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "例えば、chatglm&gpt-3.5-turbo&api2d-gpt-4", - "使用中文回答我的问题": "中国語で私の質問に答えてください", - "我需要你找一张网络图片": "インターネット上の画像を探してください", - "我上传了文件": "ファイルをアップロードしました", - "从而实现分批次处理": "バッチ処理を実現するため", - "我们先及时地做一次界面更新": "まず、タイムリーに画面を更新します", - "您还需要运行": "実行する必要があります", - "该函数只有20多行代码": "その関数には20行以上のコードしかありません", - "但端口号都应该在最显眼的位置上": "しかし、ポート番号は常に目立つ場所にある必要があります", - "Token溢出数": "Tokenオーバーフロー数", - "private_upload里面的文件名在解压zip后容易出现乱码": "private_upload内のファイル名は、zipを解凍すると文字化けしやすいです", - "以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "以下の「赤色」で表示された関数プラグインは、パスを入力エリアから引数として読み取る必要があります", - "如果WEB_PORT是-1": "WEB_PORTが-1の場合", - "防止回答时Token溢出": "回答時のTokenオーバーフローを防止する", - "第三种情况": "第3の場合", - "前言": "序文", - "打开文件": "ファイルを開く", - "用于输入给GPT的前提提示": "GPTに入力するための前提条件のヒント", - "返回值": "戻り値", - "请查收": "受信箱を確認してください", - "看门狗": "ウォッチドッグ", - "返回重试": "戻って再試行する", - "裁剪input": "inputをトリミングする", - "字符串": "文字列", - "以下是信息源": "以下は情報源です", - "你是一名专业的学术教授": "あなたは専門の学術教授です", - "处理中途中止的情况": "途中で処理を中止する場合", - "清除历史": "履歴をクリアする", - "完成了吗": "完了しましたか", - "接收文件后与chatbot的互动": "ファイルを受信した後、chatbotとのインタラクション", - "插件初始化中": "プラグインの初期化中", - "系统静默prompt": "システム静黙プロンプト", - "上下文管理器必须实现两个方法": "コンテキストマネージャは2つのメソッドを実装する必要があります", - "你需要翻译以下内容": "以下の内容を翻訳する必要があります", - "的api-key": "のAPIキー", - "收到消息": "メッセージを受信しました", - "将插件中出的所有问题显示在界面上": "すべての問題をインターフェースに表示する", - "正在提取摘要并下载PDF文档……": "要約を抽出し、PDFドキュメントをダウンロードしています...", - "不能达到预期效果": "期待される効果が得られない", - "清除当前溢出的输入": "現在のオーバーフロー入力をクリアする", - "当文件被上传时的回调函数": "ファイルがアップロードされたときのコールバック関数", - "已重置": "リセットされました", - "无": "なし", - "总结输出": "出力をまとめる", - "第 3 步": "ステップ3", - "否则可能导致显存溢出而造成卡顿": "それ以外の場合、グラフィックスメモリのオーバーフローが発生し、フリーズが発生する可能性があります", - "gradio的inbrowser触发不太稳定": "Gradioのinbrowserトリガーはあまり安定していません", - "发送至LLM": "LLMに送信", - "异步任务开始": "非同期タスクが開始されました", - "和openai的连接容易断掉": "OpenAIとの接続が簡単に切断される", - "用一句话概括程序的整体功能": "プログラムの全体的な機能を一言で表す", - "等待NewBing响应": "NewBingの応答を待っています", - "会自动使用已配置的代理": "事前に設定されたプロキシを自動的に使用します", - "带Cookies的Chatbot类": "Cookieを持つChatbotクラス", - "安装MOSS的依赖": "MOSSの依存関係をインストールする", - "或者": "または", - "函数插件-下拉菜单与随变按钮的互动": "関数プラグイン-ドロップダウンメニューと可変ボタンの相互作用", - "完成": "完了", - "这段代码来源 https": "このコードの出典:https", - "年份获取失败": "年を取得できませんでした", - "你必须逐个文献进行处理": "文献を1つずつ処理する必要があります", - "文章极长": "記事が非常に長い", - "选择处理": "処理を選択する", - "进入任务等待状态": "タスク待機状態に入る", - "它可以作为创建新功能函数的模板": "It can serve as a template for creating new feature functions", - "当前模型": "Current model", - "中间过程不予显示": "Intermediate process is not displayed", - "OpenAI模型选择是": "OpenAI model selection is", - "故可以只分析文章内容": "So only the content of the article can be analyzed", - "英语学术润色": "English academic polishing", - "此key无效": "This key is invalid", - "您可能需要手动安装新增的依赖库": "You may need to manually install the new dependency library", - "会把traceback和已经接收的数据转入输出": "Will transfer traceback and received data to output", - "后语": "Postscript", - "最后用中文翻译摘要部分": "Finally, translate the abstract section into Chinese", - "如果直接在海外服务器部署": "If deployed directly on overseas servers", - "找不到任何前端相关文件": "No frontend-related files can be found", - "Not enough point. API2D账户点数不足": "Not enough points. API2D account points are insufficient", - "当前版本": "Current version", - "1. 临时解决方案": "1. Temporary solution", - "第8步": "Step 8", - "历史": "History", - "是否在结束时": "Whether to write conversation history at the end", - "对话历史写入": "Write conversation history", - "观测窗": "Observation window", - "刷新时间间隔频率": "Refresh time interval frequency", - "当输入部分的token占比": "When the token proportion of the input part is", - "这是什么": "What is this", - "现将您的现有配置移动至config_private.py以防止配置丢失": "Now move your existing configuration to config_private.py to prevent configuration loss", - "尝试": "Try", - "您也可以选择删除此行警告": "You can also choose to delete this warning line", - "调用主体": "Call subject", - "当前代理可用性": "Current proxy availability", - "将单空行": "Single blank line", - "将结果写入markdown文件中": "Write the result to a markdown file", - "按输入的匹配模式寻找上传的非压缩文件和已解压的文件": "Find uploaded uncompressed files and decompressed files according to the input matching mode", - "设置5秒即可": "Set for 5 seconds", - "需要安装pip install rarfile来解压rar文件": "Need to install pip install rarfile to decompress rar files", - "如API和代理网址": "Such as API and proxy URLs", - "每个子任务的输入": "Input for each subtask", - "而在上下文执行结束时": "While at the end of the context execution", - "Incorrect API key. OpenAI以提供了不正确的API_KEY为由": "Incorrect API key. OpenAI cites incorrect API_KEY as the reason", - "即在代码结构不变得情况下取代其他的上下文管理器": "That is, replace other context managers without changing the code structure", - "递归搜索": "Recursive search", - "找到原文本中的换行符": "Find line breaks in the original text", - "开始了吗": "Has it started?", - "地址": "Address", - "将生成的报告自动投射到文件上传区": "Automatically project the generated report to the file upload area", - "数据流的显示最后收到的多少个字符": "Display how many characters the data stream received last", - "缺少ChatGLM的依赖": "Missing dependency for ChatGLM", - "不需要修改": "No modification needed", - "正在分析一个源代码项目": "Analyzing a source code project", - "第7步": "Step 7", - "这是什么功能": "What is this function?", - "你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性": "Your task is to improve the spelling, grammar, clarity, conciseness, and overall readability of the provided text", - "不起实际作用": "Does not have any actual effect", - "不显示中间过程": "Do not display intermediate processes", - "对整个Latex项目进行翻译": "Translate the entire Latex project", - "在上下文执行开始的情况下": "When the context execution starts", - "等待ChatGLM响应中": "ChatGLMの応答を待っています", - "GPT输出格式错误": "GPTの出力形式が間違っています", - "最多同时执行5个": "同時に最大5つ実行できます", - "解析此项目本身": "このプロジェクト自体を解析する", - "肯定已经都结束了": "もう終わったに違いない", - "英文Latex项目全文润色": "英語のLatexプロジェクト全体を校正する", - "修改函数插件后": "関数プラグインを変更した後", - "请谨慎操作": "注意して操作してください", - "等待newbing回复的片段": "newbingの返信を待っているフラグメント", - "第 5 步": "5番目のステップ", - "迭代上一次的结果": "前回の結果を反復処理する", - "载入对话": "対話をロードする", - "最后": "最後に", - "在前端打印些好玩的东西": "フロントエンドで面白いものを印刷する", - "用于显示给用户": "ユーザーに表示するために使用されます", - "在界面上显示结果": "結果をインターフェースに表示する", - "检查一下是不是忘了改config": "configを変更するのを忘れていないか確認してください", - "亮色主题": "明るいテーマ", - "开始请求": "リクエストを開始する", - "若输入0": "0を入力する場合", - "清除换行符": "改行をクリアする", - "Token溢出": "トークンオーバーフロー", - "靛蓝色": "藍紫色", - "的主要内容": "の主な内容", - "执行中": "実行中", - "生成http请求": "httpリクエストを生成する", - "第一页清理后的文本内容列表": "最初のページのクリーンアップされたテキストコンテンツリスト", - "初始值是摘要": "初期値は要約です", - "Free trial users的限制是每分钟3次": "無料トライアルユーザーの制限は、1分あたり3回です", - "处理markdown文本格式的转变": "Markdownテキストのフォーマット変換", - "如没有给定输入参数": "入力パラメータが指定されていない場合", - "缺少MOSS的依赖": "MOSSの依存関係が不足しています", - "打开插件列表": "プラグインリストを開く", - "失败了": "失敗しました", - "OpenAI和API2D不会走这里": "OpenAIとAPI2Dはここを通過しません", - "解析整个前端项目": "フロントエンドプロジェクト全体を解析する", - "将要忽略匹配的文件名": "一致するファイル名を無視する予定です", - "网页的端口": "Webページのポート", - "切分和重新整合": "分割と再結合", - "有肉眼不可见的小变化": "肉眼では見えない微小な変化があります", - "实现插件的热更新": "プラグインのホット更新を実現する", - "默认值": "デフォルト値", - "字符数小于100": "文字数が100未満です", - "更新UI": "UIを更新する", - "我们剥离Introduction之后的部分": "Introductionを削除した後の部分", - "注意目前不能多人同时调用NewBing接口": "現時点では、複数のユーザーが同時にNewBing APIを呼び出すことはできません", - "黄色": "黄色", - "中提取出“标题”、“收录会议或期刊”等基本信息": "タイトル、収録会議またはジャーナルなどの基本情報を抽出する", - "NewBing响应异常": "NewBingの応答が異常です", - "\\cite和方程式": "\\citeと方程式", - "则覆盖原config文件": "元のconfigファイルを上書きする", - "Newbing失败": "Newbingが失敗しました", - "需要预先pip install py7zr": "事前にpip install py7zrが必要です", - "换行 -": "改行 -", - "然后通过getattr函数获取函数名": "その後、getattr関数を使用して関数名を取得します", - "中性色": "中性色", - "直到历史记录的标记数量降低到阈值以下": "直到履歴のマーク数が閾値以下になるまで", - "请按以下描述给我发送图片": "以下の説明に従って画像を送信してください", - "用学术性语言写一段中文摘要": "学術的な言葉で中国語の要約を書く", - "开发者们❤️": "開発者たち❤️", - "解析整个C++项目头文件": "C++プロジェクトのヘッダーファイル全体を解析する", - "将输入和输出解析为HTML格式": "入力と出力をHTML形式で解析する", - "重试一次": "もう一度やり直す", - "如1812.10695": "例えば1812.10695のように", - "当无法用标点、空行分割时": "句読点や空行で区切ることができない場合", - "第二步": "2番目のステップ", - "如果是第一次运行": "初めて実行する場合", - "第一组插件": "最初のプラグイングループ", - "其中$E$是能量": "ここで$E$はエネルギーです", - "在结束时": "終了時に", - "OpenAI拒绝了请求": "OpenAIはリクエストを拒否しました", - "则会在溢出时暴力截断": "オーバーフロー時に強制的に切り捨てられます", - "中途接收可能的终止指令": "途中で可能な終了命令を受信する", - "experiment等": "実験など", - "结束": "終了する", - "发送请求到子进程": "子プロセスにリクエストを送信する", - "代码已经更新": "コードはすでに更新されています", - "情况会好转": "状況は改善されます", - "请削减单次输入的文本量": "一度に入力するテキスト量を減らしてください", - "每个线程都要“喂狗”": "各スレッドは「犬に餌を与える」必要があります", - "也可以写": "書くこともできます", - "导入软件依赖失败": "ソフトウェアの依存関係のインポートに失敗しました", - "代理网络的地址": "プロキシネットワークのアドレス", - "gpt_replying_buffer也写完了": "gpt_replying_bufferも書き終わりました", - "依赖检测通过": "Dependency check passed", - "并提供改进建议": "And provide improvement suggestions", - "Call ChatGLM fail 不能正常加载ChatGLM的参数": "Call ChatGLM fail, unable to load ChatGLM parameters", - "请对下面的文章片段做一个概述": "Please summarize the following article fragment", - "建议使用docker环境!": "It is recommended to use a docker environment!", - "单线": "Single line", - "将中文句号": "Replace Chinese period", - "高级实验性功能模块调用": "Advanced experimental function module call", - "个": "pieces", - "MOSS响应异常": "MOSS response exception", - "一键更新协议": "One-click update agreement", - "最多收纳多少个网页的结果": "Maximum number of web page results to be included", - "历史上的今天": "Today in history", - "jittorllms尚未加载": "jittorllms has not been loaded", - "不输入文件名": "Do not enter file name", - "准备文件的下载": "Preparing for file download", - "找不到任何golang文件": "Cannot find any golang files", - "找不到任何rust文件": "Cannot find any rust files", - "写入文件": "Write to file", - "LLM_MODEL 格式不正确!": "LLM_MODEL format is incorrect!", - "引用次数是链接中的文本": "The reference count is the text in the link", - "则使用当前时间生成文件名": "Then use the current time to generate the file name", - "第二组插件": "Second set of plugins", - "-1代表随机端口": "-1 represents a random port", - "无代理状态下很可能无法访问OpenAI家族的模型": "It is very likely that you cannot access the OpenAI family of models without a proxy", - "分别为 __enter__": "They are __enter__ respectively", - "设定一个最小段落长度阈值": "Set a minimum paragraph length threshold", - "批量TranslateFromChiToEngInMarkdown": "Batch TranslateFromChiToEngInMarkdown", - "您若希望分享新的功能模组": "If you want to share new functional modules", - "先输入问题": "Enter the question first", - "理解PDF论文内容": "Understand the content of the PDF paper", - "质能方程可以写成$$E=mc^2$$": "The mass-energy equation can be written as $$E=mc^2$$", - "安装ChatGLM的依赖": "Install dependencies for ChatGLM", - "自动更新程序": "Automatic update program", - "备份一个文件": "Backup a file", - "并行任务数量限制": "Parallel task quantity limit", - "将y中最后一项的输入部分段落化": "Paragraphize the input part of the last item in y", - "和": "and", - "尝试Prompt": "Try Prompt", - "且没有代码段": "And there is no code segment", - "设置gradio的并行线程数": "Set the parallel thread number of gradio", - "请提取": "Please extract", - "向chatbot中添加错误信息": "Add error message to chatbot", - "处理文件的上传": "Handle file upload", - "异常": "Exception", - "此处不修改": "Do not modify here", - "*** API_KEY 导入成功": "*** API_KEY imported successfully", - "多线程方法": "Multi-threaded method", - "也可以根据之前的内容长度来判断段落是否已经足够长": "You can also judge whether the paragraph is long enough based on the length of the previous content", - "同样支持多线程": "Also supports multi-threading", - "代理所在地": "Location of the proxy", - "chatbot 为WebUI中显示的对话列表": "Chatbot is the list of conversations displayed in WebUI", - "对话窗的高度": "Height of the conversation window", - "体验gpt-4可以试试api2d": "You can try api2d to experience gpt-4", - "观察窗": "Observation window", - "Latex项目全文英译中": "Full translation of Latex project from English to Chinese", - "接下来请将以下代码中包含的所有中文转化为英文": "Next, please translate all the Chinese in the following code into English", - "以上材料已经被写入": "以上の材料が書き込まれました", - "清理规则包括": "クリーニングルールには以下が含まれます", - "展示分割效果": "分割効果を表示する", - "运行方法 python crazy_functions/crazy_functions_test.py": "python crazy_functions/crazy_functions_test.pyを実行する方法", - "不要遗漏括号": "括弧を省略しないでください", - "对IPynb文件进行解析": "IPynbファイルを解析する", - "它们会继续向下调用更底层的LLM模型": "それらはより低レベルのLLMモデルを呼び出し続けます", - "这个函数用于分割pdf": "この関数はPDFを分割するために使用されます", - "等待输入": "入力を待っています", - "句号": "句点", - "引入一个有cookie的chatbot": "cookieを持つchatbotを導入する", - "优先": "優先", - "没有提供高级参数功能说明": "高度なパラメータ機能の説明が提供されていません", - "找不到任何文件": "ファイルが見つかりません", - "将要忽略匹配的文件后缀": "一致するファイルの拡張子を無視する予定です", - "函数插件-固定按钮区": "関数プラグイン-固定ボタンエリア", - "如果要使用Newbing": "Newbingを使用する場合", - "缺少jittorllms的依赖": "jittorllmsの依存関係が不足しています", - "尽量是完整的一个section": "可能な限り完全なセクションであること", - "请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分": "「タイトル」、「収録会議またはジャーナル」、「著者」、「要約」、「番号」、「著者の電子メール」の6つの部分を抽出してください", - "检查USE_PROXY选项是否修改": "USE_PROXYオプションが変更されているかどうかを確認してください", - "自动截断": "自動切断", - "多线程操作已经开始": "マルチスレッド操作が開始されました", - "根据当前的模型类别": "現在のモデルタイプに基づいて", - "兼容旧版的配置": "古いバージョンの構成と互換性があります", - "找不到任何python文件": "Pythonファイルが見つかりません", - "这个bug没找到触发条件": "このバグのトリガー条件が見つかりませんでした", - "学术中英互译": "学術的な英中翻訳", - "列表递归接龙": "リストの再帰的な接続", - "新版本": "新しいバージョン", - "返回的结果是": "返された結果は", - "以免输入溢出": "オーバーフローを防ぐために", - "流式获取输出": "ストリームで出力を取得する", - "逐个文件分析": "ファイルを1つずつ分析する", - "随机负载均衡": "ランダムな負荷分散", - "高级参数输入区": "高度なパラメータ入力エリア", - "稍微留一点余地": "少し余裕を持たせる", - "并显示到聊天当中": "チャットに表示される", - "不在arxiv中无法获取完整摘要": "arxivにないと完全な要約を取得できません", - "用户反馈": "ユーザーフィードバック", - "有线程锁": "スレッドロックあり", - "一键DownloadArxivPapersAndTranslateAbstract": "一括でArxiv論文をダウンロードして要約を翻訳する", - "现在您点击任意“红颜色”标识的函数插件时": "今、あなたが任意の「赤い」関数プラグインをクリックすると", - "请从": "からお願いします", - "也支持同时填写多个api-key": "複数のAPIキーを同時に入力することもできます", - "也许等待十几秒后": "おそらく10秒以上待つ必要があります", - "第": "第", - "在函数插件中被调用": "関数プラグインで呼び出されます", - "此外我们也提供可同步处理大量文件的多线程Demo供您参考": "また、大量のファイルを同期的に処理するためのマルチスレッドデモも提供しています", - "的配置": "の設定", - "数据流的第一帧不携带content": "データストリームの最初のフレームにはcontentが含まれていません", - "老旧的Demo": "古いデモ", - "预处理一波": "事前処理を行う", - "获取所有文章的标题和作者": "すべての記事のタイトルと著者を取得する", - "输出 Returns": "Returnsを出力する", - "Reduce the length. 本次输入过长": "長さを短くしてください。入力が長すぎます", - "抽取摘要": "要約を抽出する", - "从最长的条目开始裁剪": "最長のエントリからトリミングを開始する", - "2. 替换跨行的连词": "2. 行をまたいだ接続詞を置換する", - "并且对于网络上的文件": "そして、ネットワーク上のファイルに対して", - "本地文件预览": "ローカルファイルのプレビュー", - "手动指定询问哪些模型": "手動でどのモデルを問い合わせるか指定する", - "如果有的话": "ある場合は", - "直接退出": "直接退出する", - "请提交新问题": "新しい問題を提出してください", - "您正在调用一个": "あなたは呼び出しています", - "请编辑以下文本": "以下のテキストを編集してください", - "常见协议无非socks5h/http": "一般的なプロトコルはsocks5h/http以外ありません", - "Latex英文纠错": "LatexEnglishErrorCorrection", - "连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion", - "联网的ChatGPT_bing版": "OnlineChatGPT_BingVersion", - "总结音视频": "SummarizeAudioVideo", - "动画生成": "GenerateAnimation", - "数学动画生成manim": "GenerateMathematicalAnimationManim", - "Markdown翻译指定语言": "TranslateMarkdownSpecifiedLanguage", - "知识库问答": "KnowledgeBaseQuestionAnswer", - "Langchain知识库": "LangchainKnowledgeBase", - "读取知识库作答": "ReadKnowledgeBaseAnswer", - "交互功能模板函数": "InteractiveFunctionTemplateFunction", - "交互功能函数模板": "InteractiveFunctionFunctionTemplate", - "Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison", - "Latex输出PDF": "LatexOutputPDFResult", - "Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF", - "语音助手": "VoiceAssistant", - "微调数据集生成": "FineTuneDatasetGeneration", - "chatglm微调工具": "ChatGLMFineTuningTool", - "启动微调": "StartFineTuning", - "sprint亮靛": "SprintAzureIndigo", - "专业词汇声明": "ProfessionalVocabularyDeclaration", - "Latex精细分解与转化": "LatexDetailedDecompositionAndConversion", - "编译Latex": "CompileLatex", - "将代码转为动画": "コードをアニメーションに変換する", - "解析arxiv网址失败": "arxivのURLの解析に失敗しました", - "其他模型转化效果未知": "他のモデルの変換効果は不明です", - "把文件复制过去": "ファイルをコピーする", - "!!!如果需要运行量化版本": "!!!量子化バージョンを実行する必要がある場合", - "报错信息如下. 如果是与网络相关的问题": "エラーメッセージは次のとおりです。ネットワークに関連する問題の場合", - "请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期": "ALIYUN_TOKENとALIYUN_APPKEYの有効期限を確認してください", - "编译结束": "コンパイル終了", - "只读": "読み取り専用", - "模型选择是": "モデルの選択は", - "正在从github下载资源": "GitHubからリソースをダウンロードしています", - "同时分解长句": "同時に長い文を分解する", - "寻找主tex文件": "メインのtexファイルを検索する", - "例如您可以将以下命令复制到下方": "たとえば、以下のコマンドを下にコピーできます", - "使用中文总结音频“": "中国語で音声を要約する", - "此处填API密钥": "ここにAPIキーを入力してください", - "裁剪输入": "入力をトリミングする", - "当前语言模型温度设定": "現在の言語モデルの温度設定", - "history 是之前的对话列表": "historyは以前の対話リストです", - "对输入的word文档进行摘要生成": "入力されたWord文書の要約を生成する", - "输入问题后点击该插件": "質問を入力した後、このプラグインをクリックします", - "仅在Windows系统进行了测试": "Windowsシステムでのみテストされています", - "reverse 操作必须放在最后": "reverse操作は最後に配置する必要があります", - "即将编译PDF": "PDFをコンパイルする予定です", - "执行错误": "エラーが発生しました", - "段音频完成了吗": "セグメントのオーディオは完了しましたか", - "然后重启程序": "それからプログラムを再起動してください", - "是所有LLM的通用接口": "これはすべてのLLMの共通インターフェースです", - "当前报错的latex代码处于第": "現在のエラーのあるLaTeXコードは第", - "🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行": "🏃‍♂️🏃‍♂️🏃‍♂️ サブプロセスの実行", - "用来描述你的要求": "要求を説明するために使用されます", - "原始PDF编译是否成功": "元のPDFのコンパイルは成功しましたか", - "本地Latex论文精细翻译": "ローカルのLaTeX論文の詳細な翻訳", - "设置OpenAI密钥和模型": "OpenAIキーとモデルの設定", - "如果使用ChatGLM2微调模型": "ChatGLM2ファインチューニングモデルを使用する場合", - "项目Github地址 \\url{https": "プロジェクトのGithubアドレス \\url{https", - "将前后断行符脱离": "前後の改行文字を削除します", - "该项目的Latex主文件是": "このプロジェクトのLaTeXメインファイルは", - "编译已经开始": "コンパイルが開始されました", - "*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{警告", - "从一批文件": "一連のファイルから", - "等待用户的再次调用": "ユーザーの再呼び出しを待っています", - "目前仅支持GPT3.5/GPT4": "現在、GPT3.5/GPT4のみをサポートしています", - "如果一句话小于7个字": "1つの文が7文字未満の場合", - "目前对机器学习类文献转化效果最好": "現在、機械学習の文献変換効果が最も良いです", - "寻找主文件": "メインファイルを検索中", - "解除插件状态": "プラグインの状態を解除します", - "默认为Chinese": "デフォルトはChineseです", - "依赖不足": "不足の依存関係", - "编译文献交叉引用": "文献の相互参照をコンパイルする", - "对不同latex源文件扣分": "異なるLaTeXソースファイルに罰則を課す", - "再列出用户可能提出的三个问题": "ユーザーが提出する可能性のある3つの問題を再リスト化する", - "建议排查": "トラブルシューティングの提案", - "生成时间戳": "タイムスタンプの生成", - "检查config中的AVAIL_LLM_MODELS选项": "configのAVAIL_LLM_MODELSオプションを確認する", - "chatglmft 没有 sys_prompt 接口": "chatglmftにはsys_promptインターフェースがありません", - "在一个异步线程中采集音频": "非同期スレッドでオーディオを収集する", - "初始化插件状态": "プラグインの状態を初期化する", - "内含已经翻译的Tex文档": "翻訳済みのTexドキュメントが含まれています", - "请注意自我隐私保护哦!": "プライバシー保護に注意してください!", - "使用正则表达式查找半行注释": "正規表現を使用して半行コメントを検索する", - "不能正常加载ChatGLMFT的参数!": "ChatGLMFTのパラメータを正常にロードできません!", - "首先你在中文语境下通读整篇论文": "まず、中国語の文脈で論文全体を読んでください", - "如 绿帽子*深蓝色衬衫*黑色运动裤": "例えば、緑の帽子*濃い青のシャツ*黒のスポーツパンツ", - "默认为default": "デフォルトはdefaultです", - "将": "置き換える", - "使用 Unsplash API": "Unsplash APIを使用する", - "会被加在你的输入之前": "あなたの入力の前に追加されます", - "还需要填写组织": "組織を入力する必要があります", - "test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBaseの読み込み", - "目前不支持历史消息查询": "現在、過去のメッセージのクエリはサポートされていません", - "临时存储用于调试": "デバッグ用の一時的なストレージ", - "提取总结": "テキストの翻訳", - "每秒采样数量": "テキストの翻訳", - "但通常不会出现在正文": "テキストの翻訳", - "通过调用conversations_open方法打开一个频道": "テキストの翻訳", - "导致输出不完整": "テキストの翻訳", - "获取已打开频道的最新消息并返回消息列表": "テキストの翻訳", - "Tex源文件缺失!": "テキストの翻訳", - "如果需要使用Slack Claude": "テキストの翻訳", - "扭转的范围": "テキストの翻訳", - "使用latexdiff生成论文转化前后对比": "テキストの翻訳", - "--读取文件": "テキストの翻訳", - "调用openai api 使用whisper-1模型": "テキストの翻訳", - "避免遗忘导致死锁": "テキストの翻訳", - "在多Tex文档中": "テキストの翻訳", - "失败时": "テキストの翻訳", - "然后转移到指定的另一个路径中": "テキストの翻訳", - "使用Newbing": "テキストの翻訳", - "的参数": "テキストの翻訳", - "后者是OPENAI的结束条件": "テキストの翻訳", - "构建知识库": "テキストの翻訳", - "吸收匿名公式": "テキストの翻訳", - "前缀": "テキストの翻訳", - "会直接转到该函数": "テキストの翻訳", - "Claude失败": "テキストの翻訳", - "P.S. 但愿没人把latex模板放在里面传进来": "P.S. 但愿没人把latex模板放在里面传进来", - "临时地启动代理网络": "临时地启动代理网络", - "读取文件内容到内存": "読み込んだファイルの内容をメモリに保存する", - "总结音频": "音声をまとめる", - "没有找到任何可读取文件": "読み込み可能なファイルが見つかりません", - "获取Slack消息失败": "Slackメッセージの取得に失敗しました", - "用黑色标注转换区": "黒い注釈で変換エリアをマークする", - "此插件处于开发阶段": "このプラグインは開発中です", - "其他操作系统表现未知": "他のオペレーティングシステムの動作は不明です", - "返回找到的第一个": "最初に見つかったものを返す", - "发现已经存在翻译好的PDF文档": "翻訳済みのPDFドキュメントが既に存在することがわかりました", - "不包含任何可用于": "使用できるものは含まれていません", - "发送到openai音频解析终端": "openai音声解析端に送信する", - "========================================= 插件主程序2 =====================================================": "========================================= プラグインメインプログラム2 =====================================================", - "正在重试": "再試行中", - "从而更全面地理解项目的整体功能": "プロジェクトの全体的な機能をより理解するために", - "正在等您说完问题": "質問が完了するのをお待ちしています", - "使用教程详情见 request_llms/README.md": "使用方法の詳細については、request_llms/README.mdを参照してください", - "6.25 加入判定latex模板的代码": "6.25 テンプレートの判定コードを追加", - "找不到任何音频或视频文件": "音声またはビデオファイルが見つかりません", - "请求GPT模型的": "GPTモデルのリクエスト", - "行": "行", - "分析上述回答": "上記の回答を分析する", - "如果要使用ChatGLMFT": "ChatGLMFTを使用する場合", - "上传Latex项目": "Latexプロジェクトをアップロードする", - "如参考文献、脚注、图注等": "参考文献、脚注、図のキャプションなど", - "未配置": "設定されていません", - "请在此处给出自定义翻译命令": "カスタム翻訳コマンドをここに入力してください", - "第二部分": "第2部分", - "解压失败! 需要安装pip install py7zr来解压7z文件": "解凍に失敗しました!7zファイルを解凍するにはpip install py7zrをインストールする必要があります", - "吸收在42行以内的begin-end组合": "42行以内のbegin-endの組み合わせを取り込む", - "Latex文件融合完成": "Latexファイルの統合が完了しました", - "输出html调试文件": "HTMLデバッグファイルの出力", - "论文概况": "論文の概要", - "修复括号": "括弧の修復", - "赋予插件状态": "プラグインの状態を付与する", - "标注节点的行数范围": "ノードの行数範囲を注釈する", - "MOSS can understand and communicate fluently in the language chosen by the user such as English and 中文. MOSS can perform any language-based tasks.": "MOSSは、ユーザーが選択した言語(英語や中文など)でスムーズに理解し、コミュニケーションすることができます。MOSSは、言語に基づくさまざまなタスクを実行できます。", - "LLM_MODEL是默认选中的模型": "LLM_MODELはデフォルトで選択されたモデルです", - "配合前缀可以把你的输入内容用引号圈起来": "接頭辞と組み合わせて、入力内容を引用符で囲むことができます", - "获取关键词": "キーワードの取得", - "本项目现已支持OpenAI和Azure的api-key": "このプロジェクトは、OpenAIおよびAzureのAPIキーをサポートしています", - "欢迎使用 MOSS 人工智能助手!": "MOSS AIアシスタントをご利用いただきありがとうございます!", - "在执行完成之后": "実行が完了した後", - "正在听您讲话": "お話をお聞きしています", - "Claude回复的片段": "Claudeの返信の一部", - "返回": "戻る", - "期望格式例如": "期待される形式の例", - "gpt 多线程请求": "GPTマルチスレッドリクエスト", - "当前工作路径为": "現在の作業パスは", - "该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "このPDFはGPT-Academicオープンソースプロジェクトによって大規模言語モデル+Latex翻訳プラグインを使用して一括生成されました", - "解决插件锁定时的界面显示问题": "プラグインのロック時のインターフェース表示の問題を解決する", - "默认 secondary": "デフォルトのセカンダリ", - "会把列表拆解": "リストを分解します", - "暂时不支持历史消息": "一時的に歴史メッセージはサポートされていません", - "或者重启之后再度尝试": "または再起動後に再試行してください", - "吸收其他杂项": "他の雑項を吸収する", - "双手离开鼠标键盘吧": "両手をマウスとキーボードから離してください", - "建议更换代理协议": "プロキシプロトコルの変更をお勧めします", - "音频助手": "オーディオアシスタント", - "请耐心等待": "お待ちください", - "翻译结果": "翻訳結果", - "请在此处追加更细致的矫错指令": "ここにより詳細なエラー修正命令を追加してください", - "编译原始PDF": "元のPDFをコンパイルする", - "-构建知识库": "-ナレッジベースの構築", - "删除中间文件夹": "中間フォルダを削除する", - "这段代码定义了一个名为TempProxy的空上下文管理器": "このコードはTempProxyという名前の空のコンテキストマネージャを定義しています", - "参数说明": "パラメータの説明", - "正在预热文本向量化模组": "テキストベクトル化モジュールのプリヒート中", - "函数插件": "関数プラグイン", - "右下角更换模型菜单中可切换openai": "右下のモデルメニューでopenaiを切り替えることができます", - "先上传数据集": "まずデータセットをアップロードしてください", - "LatexEnglishErrorCorrection+高亮修正位置": "テキストの翻訳", - "正在构建知识库": "テキストの翻訳", - "用红色标注处保留区": "テキストの翻訳", - "安装Claude的依赖": "テキストの翻訳", - "已禁用": "テキストの翻訳", - "是否在提交时自动清空输入框": "テキストの翻訳", - "GPT 学术优化": "テキストの翻訳", - "需要特殊依赖": "テキストの翻訳", - "test_联网回答问题": "テキストの翻訳", - "除非您是论文的原作者": "テキストの翻訳", - "即可见": "テキストの翻訳", - "解析为简体中文": "テキストの翻訳", - "解析整个Python项目": "テキストの翻訳", - "========================================= 插件主程序1 =====================================================": "テキストの翻訳", - "当前参数": "テキストの翻訳", - "处理个别特殊插件的锁定状态": "テキストの翻訳", - "已知某些代码的局部作用是": "テキストの翻訳", - "请务必用 pip install -r requirements.txt 指令安装依赖": "テキストの翻訳", - "安装": "テキストの翻訳", - "请登录OpenAI查看详情 https": "テキストの翻訳", - "必须包含documentclass": "テキストの翻訳", - "极少数情况下": "テキストの翻訳", - "并将返回的频道ID保存在属性CHANNEL_ID中": "テキストの翻訳", - "您的 API_KEY 不满足任何一种已知的密钥格式": "テキストの翻訳", - "-预热文本向量化模组": "テキストの翻訳", - "什么都没有": "テキストの翻訳", - "等待GPT响应": "テキストの翻訳", - "请尝试把以下指令复制到高级参数区": "テキストの翻訳", - "模型参数": "テキストの翻訳", - "先删除": "テキストの翻訳", - "响应中": "テキストの翻訳", - "开始接收chatglmft的回复": "テキストの翻訳", - "手动指定语言": "テキストの翻訳", - "获取线程锁": "テキストの翻訳", - "当前大语言模型": "テキストの翻訳", - "段音频的第": "テキストの翻訳", - "正在编译对比PDF": "テキストの翻訳", - "根据需要切换prompt": "テキストの翻訳", - "取评分最高者返回": "テキストの翻訳", - "如果您是论文原作者": "テキストの翻訳", - "段音频的主要内容": "テキストの翻訳", - "为啥chatgpt会把cite里面的逗号换成中文逗号呀": "テキストの翻訳", - "为每一位访问的用户赋予一个独一无二的uuid编码": "テキストの翻訳", - "将每次对话记录写入Markdown格式的文件中": "テキストの翻訳", - "ChatGLMFT尚未加载": "テキストの翻訳", - "切割音频文件": "テキストの翻訳", - "例如 f37f30e0f9934c34a992f6f64f7eba4f": "テキストの翻訳", - "work_folder = Latex预处理": "テキストの翻訳", - "出问题了": "問題が発生しました", - "等待Claude响应中": "Claudeの応答を待っています", - "增强稳健性": "信頼性を向上させる", - "赋予插件锁定 锁定插件回调路径": "プラグインにコールバックパスをロックする", - "将多文件tex工程融合为一个巨型tex": "複数のファイルのtexプロジェクトを1つの巨大なtexに統合する", - "参考文献转Bib": "参考文献をBibに変換する", - "由于提问含不合规内容被Azure过滤": "質問が規則に違反しているため、Azureによってフィルタリングされました", - "读取优先级": "優先度を読み取る", - "格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "形式はorg-xxxxxxxxxxxxxxxxxxxxxxxxのようです", - "辅助gpt生成代码": "GPTのコード生成を補助する", - "读取音频文件": "音声ファイルを読み取る", - "输入arxivID": "arxivIDを入力する", - "转化PDF编译是否成功": "PDFのコンパイルが成功したかどうかを変換する", - "Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数": "ChatGLMFTのパラメータを正常にロードできませんでした", - "创建AcsClient实例": "AcsClientのインスタンスを作成する", - "将 chatglm 直接对齐到 chatglm2": "chatglmをchatglm2に直接整列させる", - "要求": "要求", - "子任务失败时的重试次数": "サブタスクが失敗した場合のリトライ回数", - "请求子进程": "サブプロセスを要求する", - "按钮是否可见": "ボタンが表示可能かどうか", - "将 \\include 命令转换为 \\input 命令": "\\includeコマンドを\\inputコマンドに変換する", - "用户填3": "ユーザーが3を入力する", - "后面是英文逗号": "後ろに英語のカンマがあります", - "吸收iffalse注释": "iffalseコメントを吸収する", - "请稍候": "お待ちください", - "摘要生成后的文档路径": "要約生成後のドキュメントのパス", - "主程序即将开始": "メインプログラムがすぐに開始されます", - "处理历史信息": "履歴情報の処理", - "根据给定的切割时长将音频文件切割成多个片段": "指定された分割時間に基づいてオーディオファイルを複数のセグメントに分割する", - "解决部分词汇翻译不准确的问题": "一部の用語の翻訳の不正確さを解決する", - "即将退出": "すぐに終了します", - "用于给一小段代码上代理": "一部のコードにプロキシを適用するために使用されます", - "提取文件扩展名": "ファイルの拡張子を抽出する", - "目前支持的格式": "現在サポートされている形式", - "第一次调用": "最初の呼び出し", - "异步方法": "非同期メソッド", - "P.S. 顺便把Latex的注释去除": "P.S. LaTeXのコメントを削除する", - "构建完成": "ビルドが完了しました", - "缺少": "不足しています", - "建议暂时不要使用": "一時的に使用しないことをお勧めします", - "对比PDF编译是否成功": "PDFのコンパイルが成功したかどうかを比較する", - "填入azure openai api的密钥": "Azure OpenAI APIのキーを入力してください", - "功能尚不稳定": "機能はまだ安定していません", - "则跳过GPT请求环节": "GPTリクエストのスキップ", - "即不处理之前的对话历史": "以前の対話履歴を処理しない", - "非Openai官方接口返回了错误": "非公式のOpenAI APIがエラーを返しました", - "其他类型文献转化效果未知": "他のタイプの文献の変換効果は不明です", - "给出一些判定模板文档的词作为扣分项": "テンプレートドキュメントの単語を減点項目として提供する", - "找 API_ORG 设置项": "API_ORGの設定項目を検索します", - "调用函数": "関数を呼び出します", - "需要手动安装新增的依赖库": "新しい依存ライブラリを手動でインストールする必要があります", - "或者使用此插件继续上传更多文件": "または、このプラグインを使用してさらにファイルをアップロードします", - "640个字节为一组": "640バイトごとにグループ化します", - "逆转出错的段落": "エラーのあるパラグラフを逆転させます", - "对话助手函数插件": "対話アシスタント関数プラグイン", - "前者是API2D的结束条件": "前者はAPI2Dの終了条件です", - "终端": "ターミナル", - "仅调试": "デバッグのみ", - "论文": "論文", - "想象一个穿着者": "着用者を想像してください", - "音频内容是": "音声の内容は", - "如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "AZUREを使用する必要がある場合は、詳細については別のドキュメント docs\\use_azure.md を参照してください", - "请先将.doc文档转换为.docx文档": ".docドキュメントを.docxドキュメントに変換してください", - "请查看终端的输出或耐心等待": "ターミナルの出力を確認するか、お待ちください", - "初始化音频采集线程": "オーディオキャプチャスレッドを初期化します", - "用该压缩包+ConversationHistoryArchive进行反馈": "この圧縮ファイル+ConversationHistoryArchiveを使用してフィードバックします", - "阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "阿里云リアルタイム音声認識の設定は難しいため、上級ユーザーのみに推奨されます 参考 https", - "多线程翻译开始": "マルチスレッド翻訳が開始されました", - "只有GenerateImage和生成图像相关": "GenerateImageと関連する画像の生成のみ", - "代理数据解析失败": "プロキシデータの解析に失敗しました", - "建议使用英文单词": "英単語の使用をお勧めします", - "功能描述": "機能の説明", - "读 docs\\use_azure.md": "ドキュメントを読む", - "将消耗较长时间下载中文向量化模型": "中国語のベクトル化モデルをダウンロードするのに時間がかかります", - "表示频道ID": "チャネルIDを表示する", - "未知指令": "不明なコマンド", - "包含documentclass关键字": "documentclassキーワードを含む", - "中读取数据构建知识库": "データを読み取って知識ベースを構築する", - "远程云服务器部署": "リモートクラウドサーバーにデプロイする", - "输入部分太自由": "入力が自由すぎる", - "读取pdf文件": "PDFファイルを読み込む", - "将两个PDF拼接": "2つのPDFを結合する", - "默认值为1000": "デフォルト値は1000です", - "写出文件": "ファイルに書き出す", - "生成的视频文件路径": "生成されたビデオファイルのパス", - "Arixv论文精细翻译": "Arixv論文の詳細な翻訳", - "用latex编译为PDF对修正处做高亮": "LaTeXでコンパイルしてPDFに修正をハイライトする", - "点击“停止”键可终止程序": "「停止」ボタンをクリックしてプログラムを終了できます", - "否则将导致每个人的Claude问询历史互相渗透": "さもないと、各人のClaudeの問い合わせ履歴が相互に侵入します", - "音频文件名": "オーディオファイル名", - "的参数!": "のパラメータ!", - "对话历史": "対話履歴", - "当下一次用户提交时": "次のユーザーの提出時に", - "数学GenerateAnimation": "数学GenerateAnimation", - "如果要使用Claude": "Claudeを使用する場合は", - "请向下翻": "下にスクロールしてください", - "报告已经添加到右侧“文件上传区”": "報告は右側の「ファイルアップロードエリア」に追加されました", - "删除整行的空注释": "空のコメントを含む行を削除する", - "建议直接在API_KEY处填写": "API_KEYの場所に直接入力することをお勧めします", - "暗色模式 / 亮色模式": "ダークモード/ライトモード", - "做一些外观色彩上的调整": "外観の色調整を行う", - "请切换至“KnowledgeBaseQuestionAnswer”插件进行知识库访问": "ナレッジベースのアクセスには「KnowledgeBaseQuestionAnswer」プラグインに切り替えてください", - "它*必须*被包含在AVAIL_LLM_MODELS列表中": "それはAVAIL_LLM_MODELSリストに含まれている必要があります", - "并设置参数": "パラメータを設定する", - "待处理的word文档路径": "処理待ちのWord文書のパス", - "调用缓存": "キャッシュを呼び出す", - "片段": "フラグメント", - "否则结束循环": "それ以外の場合はループを終了する", - "请对下面的音频片段做概述": "以下のオーディオフラグメントについて概要を作成してください", - "高危设置! 常规情况下不要修改! 通过修改此设置": "高リスクの設定!通常は変更しないでください!この設定を変更することで", - "插件锁定中": "プラグインがロックされています", - "开始": "開始", - "但请查收结果": "結果を確認してください", - "刷新Gradio前端界面": "Gradioフロントエンドインターフェースをリフレッシュする", - "批量SummarizeAudioVideo": "オーディオビデオを一括要約する", - "一个单实例装饰器": "単一のインスタンスデコレータ", - "Claude响应异常": "Claudeの応答が異常です", - "但内部用stream的方法避免中途网线被掐": "ただし、途中でネットワーク接続が切断されることを避けるために、内部ではストリームを使用しています", - "检查USE_PROXY": "USE_PROXYを確認する", - "永远给定None": "常にNoneを指定する", - "报告如何远程获取": "報告のリモート取得方法", - "您可以到Github Issue区": "GithubのIssueエリアにアクセスできます", - "如果只询问1个大语言模型": "1つの大規模言語モデルにのみ質問する場合", - "为了防止大语言模型的意外谬误产生扩散影响": "大規模言語モデルの誤った結果が広がるのを防ぐために", - "编译BibTex": "BibTexのコンパイル", - "⭐多线程方法": "マルチスレッドの方法", - "推荐http": "httpをおすすめします", - "如果要使用": "使用する場合", - "的单词": "の単語", - "如果本地使用不建议加这个": "ローカルで使用する場合はお勧めしません", - "避免线程阻塞": "スレッドのブロックを回避する", - "吸收title与作者以上的部分": "タイトルと著者以上の部分を吸収する", - "作者": "著者", - "5刀": "5ドル", - "ChatGLMFT响应异常": "ChatGLMFTの応答異常", - "才能继续下面的步骤": "次の手順に進むために", - "对这个人外貌、身处的环境、内心世界、过去经历进行描写": "この人の外見、環境、内面世界、過去の経験について描写する", - "找不到微调模型检查点": "ファインチューニングモデルのチェックポイントが見つかりません", - "请仔细鉴别并以原文为准": "注意深く確認し、元のテキストを参照してください", - "计算文件总时长和切割点": "ファイルの総時間とカットポイントを計算する", - "我将为您查找相关壁纸": "関連する壁紙を検索します", - "此插件Windows支持最佳": "このプラグインはWindowsに最適です", - "请输入关键词": "キーワードを入力してください", - "以下所有配置也都支持利用环境变量覆写": "以下のすべての設定は環境変数を使用して上書きすることもサポートしています", - "尝试第": "第#", - "开始生成动画": "アニメーションの生成を開始します", - "免费": "無料", - "我好!": "私は元気です!", - "str类型": "strタイプ", - "生成数学动画": "数学アニメーションの生成", - "GPT结果已输出": "GPTの結果が出力されました", - "PDF文件所在的路径": "PDFファイルのパス", - "源码自译解": "ソースコードの自動翻訳解析", - "格式如org-123456789abcdefghijklmno的": "org-123456789abcdefghijklmnoの形式", - "请对这部分内容进行语法矫正": "この部分の内容に文法修正を行ってください", - "调用whisper模型音频转文字": "whisperモデルを使用して音声をテキストに変換する", - "编译转化后的PDF": "変換されたPDFをコンパイルする", - "将音频解析为简体中文": "音声を簡体字中国語に解析する", - "删除或修改歧义文件": "曖昧なファイルを削除または修正する", - "ChatGLMFT消耗大量的内存": "ChatGLMFTは大量のメモリを消費します", - "图像生成所用到的提示文本": "画像生成に使用されるヒントテキスト", - "如果已经存在": "既に存在する場合", - "以下是一篇学术论文的基础信息": "以下は学術論文の基本情報です", - "解压失败! 需要安装pip install rarfile来解压rar文件": "解凍に失敗しました!rarファイルを解凍するにはpip install rarfileをインストールする必要があります", - "一般是文本过长": "通常、テキストが長すぎます", - "单线程": "シングルスレッド", - "Linux下必须使用Docker安装": "LinuxではDockerを使用してインストールする必要があります", - "请先上传文件素材": "まずファイル素材をアップロードしてください", - "如果分析错误": "もし解析エラーがある場合", - "快捷的调试函数": "便利なデバッグ関数", - "欢迎使用 MOSS 人工智能助手!输入内容即可进行对话": "MOSS AIアシスタントをご利用いただきありがとうございます!入力内容を入力すると、対話ができます", - "json等": "jsonなど", - "--读取参数": "--パラメータの読み込み", - "⭐单线程方法": "⭐シングルスレッドメソッド", - "请用一句话概括这些文件的整体功能": "これらのファイルの全体的な機能を一文で要約してください", - "用于灵活调整复杂功能的各种参数": "複雑な機能を柔軟に調整するためのさまざまなパラメータ", - "默认 False": "デフォルトはFalseです", - "生成中文PDF": "中国語のPDFを生成する", - "正在处理": "処理中", - "需要被切割的音频文件名": "分割する必要のある音声ファイル名", - "根据文本使用GPT模型生成相应的图像": "テキストに基づいてGPTモデルを使用して対応する画像を生成する", - "可选": "オプション", - "Aliyun音频服务异常": "Aliyunオーディオサービスの異常", - "尝试下载": "ダウンロードを試みる", - "需Latex": "LaTeXが必要です", - "拆分过长的Markdown文件": "長すぎるMarkdownファイルを分割する", - "当前支持的格式包括": "現在サポートされている形式には", - "=================================== 工具函数 ===============================================": "=================================== ユーティリティ関数 ===============================================", - "所有音频都总结完成了吗": "すべてのオーディオが要約されましたか", - "没有设置ANTHROPIC_API_KEY": "ANTHROPIC_API_KEYが設定されていません", - "详见项目主README.md": "詳細はプロジェクトのメインREADME.mdを参照してください", - "使用": "使用する", - "P.S. 其他可用的模型还包括": "P.S. 其他可用的模型还包括", - "保证括号正确": "保证括号正确", - "或代理节点": "或代理节点", - "整理结果为压缩包": "整理结果为压缩包", - "实时音频采集": "实时音频采集", - "获取回复": "获取回复", - "插件可读取“输入区”文本/路径作为参数": "插件可读取“输入区”文本/路径作为参数", - "请讲话": "请讲话", - "将文件复制一份到下载区": "将文件复制一份到下载区", - "from crazy_functions.虚空终端 import 终端": "from crazy_functions.虚空终端 import 终端", - "这个paper有个input命令文件名大小写错误!": "这个paper有个input命令文件名大小写错误!", - "解除插件锁定": "解除插件锁定", - "不能加载Claude组件": "不能加载Claude组件", - "如果有必要": "如果有必要", - "禁止移除或修改此警告": "禁止移除或修改此警告", - "然后进行问答": "然后进行问答", - "响应异常": "响应异常", - "使用英文": "使用英文", - "add gpt task 创建子线程请求gpt": "add gpt task 创建子线程请求gpt", - "实际得到格式": "实际得到格式", - "请继续分析其他源代码": "请继续分析其他源代码", - "”的主要内容": "”的主要内容", - "防止proxies单独起作用": "防止proxies单独起作用", - "临时地激活代理网络": "临时地激活代理网络", - "屏蔽空行和太短的句子": "屏蔽空行和太短的句子", - "把某个路径下所有文件压缩": "把某个路径下所有文件压缩", - "您需要首先调用构建知识库": "您需要首先调用构建知识库", - "翻译-": "翻译-", - "Newbing 请求失败": "Newbing 请求失败", - "次编译": "次编译", - "后缀": "后缀", - "文本碎片重组为完整的tex片段": "文本碎片重组为完整的tex片段", - "待注入的知识库名称id": "待注入的知识库名称id", - "消耗时间的函数": "消耗时间的函数", - "You are associated with a deactivated account. OpenAI以账户失效为由": "You are associated with a deactivated account. OpenAI以账户失效为由", - "成功啦": "成功啦", - "音频文件的路径": "音频文件的路径", - "英文Latex项目全文纠错": "英文Latex项目全文纠错", - "将子线程的gpt结果写入chatbot": "将子线程的gpt结果写入chatbot", - "开始最终总结": "开始最终总结", - "调用": "调用", - "正在锁定插件": "正在锁定插件", - "记住当前的label": "记住当前的label", - "根据自然语言执行插件命令": "根据自然语言执行插件命令", - "response中会携带traceback报错信息": "response中会携带traceback报错信息", - "避免多用户干扰": "避免多用户干扰", - "顺利完成": "顺利完成", - "详情见https": "详情见https", - "清空label": "ラベルをクリアする", - "这需要一段时间计算": "これには時間がかかります", - "找不到": "見つかりません", - "消耗大量的内存": "大量のメモリを消費する", - "安装方法https": "インストール方法https", - "为发送请求做准备": "リクエストの準備をする", - "第1次尝试": "1回目の試み", - "检查结果": "結果をチェックする", - "精细切分latex文件": "LaTeXファイルを細かく分割する", - "api2d等请求源": "api2dなどのリクエストソース", - "填入你亲手写的部署名": "あなたが手書きしたデプロイ名を入力してください", - "给出指令": "指示を与える", - "请问什么是质子": "プロトンとは何ですか", - "请直接去该路径下取回翻译结果": "直接そのパスに移動して翻訳結果を取得してください", - "等待Claude回复的片段": "Claudeの返信を待っているフラグメント", - "Latex没有安装": "LaTeXがインストールされていません", - "文档越长耗时越长": "ドキュメントが長いほど時間がかかります", - "没有阿里云语音识别APPKEY和TOKEN": "阿里雲の音声認識のAPPKEYとTOKENがありません", - "分析结果": "結果を分析する", - "请立即终止程序": "プログラムを即座に終了してください", - "正在尝试自动安装": "自動インストールを試みています", - "请直接提交即可": "直接提出してください", - "将指定目录下的PDF文件从英文翻译成中文": "指定されたディレクトリ内のPDFファイルを英語から中国語に翻訳する", - "请查收结果": "結果を確認してください", - "上下布局": "上下布局", - "此处可以输入解析提示": "此处可以输入解析提示", - "前面是中文逗号": "前面是中文逗号", - "的依赖": "的依赖", - "材料如下": "材料如下", - "欢迎加REAME中的QQ联系开发者": "欢迎加REAME中的QQ联系开发者", - "开始下载": "開始ダウンロード", - "100字以内": "100文字以内", - "创建request": "リクエストの作成", - "创建存储切割音频的文件夹": "切り取られた音声を保存するフォルダの作成", - "⭐主进程执行": "⭐メインプロセスの実行", - "音频解析结果": "音声解析結果", - "Your account is not active. OpenAI以账户失效为由": "アカウントがアクティブではありません。OpenAIはアカウントの無効化を理由にしています", - "虽然PDF生成失败了": "PDFの生成に失敗しました", - "如果这里报错": "ここでエラーが発生した場合", - "前面是中文冒号": "前面は中国語のコロンです", - "SummarizeAudioVideo内容": "SummarizeAudioVideoの内容", - "openai的官方KEY需要伴随组织编码": "openaiの公式KEYは組織コードと一緒に必要です", - "是本次输入": "これは今回の入力です", - "色彩主体": "色彩の主体", - "Markdown翻译": "Markdownの翻訳", - "会被加在你的输入之后": "あなたの入力の後に追加されます", - "失败啦": "失敗しました", - "每个切割音频片段的时长": "各切り取り音声の長さ", - "拆分过长的latex片段": "原始文本", - "待提取的知识库名称id": "原始文本", - "在这里放一些网上搜集的demo": "原始文本", - "环境变量配置格式见docker-compose.yml": "原始文本", - "Claude组件初始化成功": "原始文本", - "尚未加载": "原始文本", - "等待Claude响应": "原始文本", - "重组": "原始文本", - "将文件添加到chatbot cookie中": "原始文本", - "回答完问题后": "原始文本", - "将根据报错信息修正tex源文件并重试": "原始文本", - "是否在触发时清除历史": "原始文本", - "尝试执行Latex指令失败": "原始文本", - "默认 True": "原始文本", - "文本碎片重组为完整的tex文件": "原始文本", - "注意事项": "原始文本", - "您接下来不能再使用其他插件了": "原始文本", - "属性": "原始文本", - "正在编译PDF文档": "原始文本", - "提取视频中的音频": "原始文本", - "正在同时咨询ChatGPT和ChatGLM……": "原始文本", - "Chuanhu-Small-and-Beautiful主题": "原始文本", - "版权归原文作者所有": "原始文本", - "如果程序停顿5分钟以上": "原始文本", - "请输入要翻译成哪种语言": "日本語", - "以秒为单位": "秒単位で", - "请以以下方式load模型!!!": "以下の方法でモデルをロードしてください!!!", - "使用时": "使用時", - "对这个人外貌、身处的环境、内心世界、人设进行描写": "この人の外見、環境、内面世界、キャラクターを描写する", - "例如翻译、解释代码、润色等等": "例えば翻訳、コードの説明、修正など", - "多线程Demo": "マルチスレッドデモ", - "不能正常加载": "正常にロードできません", - "还原部分原文": "一部の元のテキストを復元する", - "可以将自身的状态存储到cookie中": "自身の状態をcookieに保存することができます", - "释放线程锁": "スレッドロックを解放する", - "当前知识库内的有效文件": "現在のナレッジベース内の有効なファイル", - "也是可读的": "読み取り可能です", - "等待ChatGLMFT响应中": "ChatGLMFTの応答を待っています", - "输入 stop 以终止对话": "stopを入力して対話を終了します", - "对整个Latex项目进行纠错": "全体のLatexプロジェクトを修正する", - "报错信息": "エラーメッセージ", - "下载pdf文件未成功": "PDFファイルのダウンロードに失敗しました", - "正在加载Claude组件": "Claudeコンポーネントを読み込んでいます", - "格式": "フォーマット", - "Claude响应缓慢": "Claudeの応答が遅い", - "该选项即将被弃用": "このオプションはまもなく廃止されます", - "正常状态": "正常な状態", - "中文Bing版": "中国語Bing版", - "代理网络配置": "プロキシネットワークの設定", - "Openai 限制免费用户每分钟20次请求": "Openaiは無料ユーザーに対して1分間に20回のリクエスト制限を設けています", - "gpt写的": "gptで書かれた", - "向已打开的频道发送一条文本消息": "既に開いているチャンネルにテキストメッセージを送信する", - "缺少ChatGLMFT的依赖": "ChatGLMFTの依存関係が不足しています", - "注意目前不能多人同时调用Claude接口": "現在、複数の人が同時にClaudeインターフェースを呼び出すことはできません", - "或者不在环境变量PATH中": "または環境変数PATHに存在しません", - "提问吧! 但注意": "質問してください!ただし注意してください", - "因此选择GenerateImage函数": "したがって、GenerateImage関数を選択します", - "无法找到一个主Tex文件": "メインのTexファイルが見つかりません", - "转化PDF编译已经成功": "PDF変換コンパイルが成功しました", - "因为在同一个频道里存在多人使用时历史消息渗透问题": "同じチャンネルで複数の人が使用する場合、過去のメッセージが漏洩する問題があります", - "SlackClient类用于与Slack API进行交互": "SlackClientクラスはSlack APIとのインタラクションに使用されます", - "如果存在调试缓存文件": "デバッグキャッシュファイルが存在する場合", - "举例": "例を挙げる", - "无需填写": "記入する必要はありません", - "配置教程&视频教程": "設定チュートリアル&ビデオチュートリアル", - "最后一步处理": "最後のステップの処理", - "定位主Latex文件": "メインのLatexファイルを特定する", - "暂不提交": "一時的に提出しない", - "由于最为关键的转化PDF编译失败": "最も重要なPDF変換コンパイルが失敗したため", - "用第二人称": "第二人称を使用する", - "例如 RoPlZrM88DnAFkZK": "例えば RoPlZrM88DnAFkZK", - "没有设置ANTHROPIC_API_KEY选项": "ANTHROPIC_API_KEYオプションが設定されていません", - "找不到任何.tex文件": "テキストの翻訳", - "请您不要删除或修改这行警告": "テキストの翻訳", - "只有第二步成功": "テキストの翻訳", - "调用Claude时": "テキストの翻訳", - "输入 clear 以清空对话历史": "テキストの翻訳", - "= 2 通过一些Latex模板中常见": "テキストの翻訳", - "没给定指令": "テキストの翻訳", - "还原原文": "テキストの翻訳", - "自定义API KEY格式": "テキストの翻訳", - "防止丢失最后一条消息": "テキストの翻訳", - "方法": "テキストの翻訳", - "压缩包": "テキストの翻訳", - "对各个llm模型进行单元测试": "テキストの翻訳", - "导入依赖失败": "テキストの翻訳", - "详情信息见requirements.txt": "テキストの翻訳", - "翻译内容可靠性无保障": "テキストの翻訳", - "刷新页面即可以退出KnowledgeBaseQuestionAnswer模式": "テキストの翻訳", - "上传本地文件/压缩包供函数插件调用": "テキストの翻訳", - "循环监听已打开频道的消息": "テキストの翻訳", - "一个包含所有切割音频片段文件路径的列表": "テキストの翻訳", - "检测到arxiv文档连接": "テキストの翻訳", - "P.S. 顺便把CTEX塞进去以支持中文": "テキストの翻訳", - "后面是英文冒号": "テキストの翻訳", - "上传文件自动修正路径": "テキストの翻訳", - "实现消息发送、接收等功能": "メッセージの送受信などの機能を実現する", - "改变输入参数的顺序与结构": "入力パラメータの順序と構造を変更する", - "正在精细切分latex文件": "LaTeXファイルを細かく分割しています", - "读取文件": "ファイルを読み込んでいます" -} diff --git a/docs/translate_std.json b/docs/translate_std.json deleted file mode 100644 index 581d83e7e0b1d3911f6ce5af0a907949b06ab65c..0000000000000000000000000000000000000000 --- a/docs/translate_std.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "解析JupyterNotebook": "ParsingJupyterNotebook", - "Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF", - "联网的ChatGPT_bing版": "OnlineChatGPT_BingEdition", - "理解PDF文档内容标准文件输入": "UnderstandPdfDocumentContentStandardFileInput", - "Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison", - "下载arxiv论文并翻译摘要": "DownloadArxivPaperAndTranslateAbstract", - "Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage", - "批量翻译PDF文档_多线程": "BatchTranslatePDFDocuments_MultiThreaded", - "下载arxiv论文翻译摘要": "DownloadArxivPaperTranslateAbstract", - "解析一个Python项目": "ParsePythonProject", - "解析一个Golang项目": "ParseGolangProject", - "代码重写为全英文_多线程": "RewriteCodeToEnglish_MultiThreaded", - "解析一个CSharp项目": "ParsingCSharpProject", - "删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords", - "批量Markdown翻译": "BatchTranslateMarkdown", - "连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion", - "Langchain知识库": "LangchainKnowledgeBase", - "Latex输出PDF": "OutputPDFFromLatex", - "把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline", - "Latex精细分解与转化": "DecomposeAndConvertLatex", - "解析一个C项目的头文件": "ParseCProjectHeaderFiles", - "Markdown英译中": "TranslateMarkdownFromEnglishToChinese", - "Markdown中译英": "MarkdownChineseToEnglish", - "数学动画生成manim": "MathematicalAnimationGenerationManim", - "chatglm微调工具": "ChatGLMFineTuningTool", - "解析一个Rust项目": "ParseRustProject", - "解析一个Java项目": "ParseJavaProject", - "联网的ChatGPT": "ChatGPTConnectedToNetwork", - "解析任意code项目": "ParseAnyCodeProject", - "合并小写开头的段落块": "MergeLowercaseStartingParagraphBlocks", - "Latex英文润色": "EnglishProofreadingForLatex", - "Latex全文润色": "FullTextProofreadingForLatex", - "询问多个大语言模型": "InquiryMultipleLargeLanguageModels", - "解析一个Lua项目": "ParsingLuaProject", - "解析ipynb文件": "ParsingIpynbFiles", - "批量总结PDF文档": "BatchSummarizePDFDocuments", - "批量翻译PDF文档": "BatchTranslatePDFDocuments", - "理解PDF文档内容": "UnderstandPdfDocumentContent", - "Latex中文润色": "LatexChineseProofreading", - "Latex英文纠错": "LatexEnglishCorrection", - "Latex全文翻译": "LatexFullTextTranslation", - "同时问询_指定模型": "InquireSimultaneously_SpecifiedModel", - "批量生成函数注释": "BatchGenerateFunctionComments", - "解析一个前端项目": "ParseFrontendProject", - "高阶功能模板函数": "HighOrderFunctionTemplateFunctions", - "高级功能函数模板": "AdvancedFunctionTemplate", - "总结word文档": "SummarizingWordDocuments", - "载入对话历史存档": "LoadConversationHistoryArchive", - "Latex中译英": "LatexChineseToEnglish", - "Latex英译中": "LatexEnglishToChinese", - "连接网络回答问题": "ConnectToNetworkToAnswerQuestions", - "交互功能模板函数": "InteractiveFunctionTemplateFunction", - "交互功能函数模板": "InteractiveFunctionFunctionTemplate", - "sprint亮靛": "SprintIndigo", - "print亮黄": "PrintBrightYellow", - "print亮绿": "PrintBrightGreen", - "print亮红": "PrintBrightRed", - "解析项目源代码": "ParseProjectSourceCode", - "解析一个C项目": "ParseCProject", - "全项目切换英文": "SwitchToEnglishForTheWholeProject", - "谷歌检索小助手": "GoogleSearchAssistant", - "读取知识库作答": "ReadKnowledgeArchiveAnswerQuestions", - "print亮蓝": "PrintBrightBlue", - "微调数据集生成": "FineTuneDatasetGeneration", - "清理多余的空行": "CleanUpExcessBlankLines", - "编译Latex": "CompileLatex", - "解析Paper": "ParsePaper", - "ipynb解释": "IpynbExplanation", - "读文章写摘要": "ReadArticleWriteSummary", - "生成函数注释": "GenerateFunctionComments", - "解析项目本身": "ParseProjectItself", - "对话历史存档": "ConversationHistoryArchive", - "专业词汇声明": "ProfessionalTerminologyDeclaration", - "解析docx": "ParseDocx", - "解析源代码新": "ParsingSourceCodeNew", - "总结音视频": "SummaryAudioVideo", - "知识库问答": "UpdateKnowledgeArchive", - "多文件润色": "ProofreadMultipleFiles", - "多文件翻译": "TranslateMultipleFiles", - "解析PDF": "ParsePDF", - "同时问询": "SimultaneousInquiry", - "图片生成": "ImageGeneration", - "动画生成": "AnimationGeneration", - "语音助手": "VoiceAssistant", - "启动微调": "StartFineTuning", - "清除缓存": "ClearCache", - "辅助功能": "Accessibility", - "虚空终端": "VoidTerminal", - "解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID", - "虚空终端主路由": "VoidTerminalMainRoute", - "批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT", - "解析PDF_基于NOUGAT": "ParsePDF_NOUGAT", - "解析一个Matlab项目": "AnalyzeAMatlabProject", - "函数动态生成": "DynamicFunctionGeneration", - "多智能体终端": "MultiAgentTerminal", - "多智能体": "MultiAgent", - "图片生成_DALLE2": "ImageGeneration_DALLE2", - "图片生成_DALLE3": "ImageGeneration_DALLE3", - "图片修改_DALLE2": "ImageModification_DALLE2", - "生成多种Mermaid图表": "GenerateMultipleMermaidCharts", - "知识库文件注入": "InjectKnowledgeBaseFiles", - "PDF翻译中文并重新编译PDF": "TranslatePDFToChineseAndRecompilePDF", - "随机小游戏": "RandomMiniGame", - "互动小游戏": "InteractiveMiniGame", - "解析历史输入": "ParseHistoricalInput", - "高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram" -} \ No newline at end of file diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json deleted file mode 100644 index 3378eda74fc53180be8323cb656be37cd25d6f3d..0000000000000000000000000000000000000000 --- a/docs/translate_traditionalchinese.json +++ /dev/null @@ -1,2273 +0,0 @@ -{ - "print亮黄": "PrintBrightYellow", - "print亮绿": "PrintBrightGreen", - "print亮红": "PrintBrightRed", - "print红": "PrintRed", - "print绿": "PrintGreen", - "print黄": "PrintYellow", - "print蓝": "PrintBlue", - "print紫": "PrintPurple", - "print靛": "PrintIndigo", - "print亮蓝": "PrintBrightBlue", - "print亮紫": "PrintBrightPurple", - "print亮靛": "PrintBrightIndigo", - "读文章写摘要": "ReadArticleWriteSummary", - "批量生成函数注释": "BatchGenerateFunctionComments", - "生成函数注释": "GenerateFunctionComments", - "解析项目本身": "ParseProjectItself", - "解析项目源代码": "ParseProjectSourceCode", - "解析一个Python项目": "ParsePythonProject", - "解析一个C项目的头文件": "ParseCProjectHeaderFile", - "解析一个C项目": "ParseCProject", - "解析一个Rust项目": "ParseRustProject", - "解析一个Java项目": "ParseJavaProject", - "解析一个前端项目": "ParseAFrontEndProject", - "高阶功能模板函数": "HigherOrderFeatureTemplateFunction", - "高级功能函数模板": "AdvancedFeatureFunctionTemplate", - "全项目切换英文": "SwitchEntireProjectToEnglish", - "代码重写为全英文_多线程": "RewriteCodeToEnglishMultithreading", - "Latex英文润色": "LatexEnglishPolishing", - "Latex全文润色": "LatexWholeDocumentPolishing", - "同时问询": "InquireSimultaneously", - "询问多个大语言模型": "InquireMultipleLargeLanguageModels", - "解析一个Lua项目": "ParseALuaProject", - "解析一个CSharp项目": "ParseACSharpProject", - "总结word文档": "SummarizeWordDocument", - "解析ipynb文件": "ParseIpynbFile", - "解析JupyterNotebook": "ParseJupyterNotebook", - "对话历史存档": "ConversationHistoryArchive", - "载入对话历史存档": "LoadConversationHistoryArchive", - "删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords", - "Markdown英译中": "MarkdownEnglishToChinese", - "批量Markdown翻译": "BatchMarkdownTranslation", - "批量总结PDF文档": "BatchSummarizePDFDocuments", - "批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsPdfminer", - "批量翻译PDF文档": "BatchTranslatePDFDocuments", - "批量翻译PDF文档_多线程": "BatchTranslatePdfDocumentsMultithreaded", - "谷歌检索小助手": "GoogleSearchAssistant", - "理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPdfDocumentContent", - "理解PDF文档内容": "UnderstandingPdfDocumentContent", - "Latex中文润色": "ChineseProofreadingInLatex", - "Latex中译英": "ChineseToEnglishTranslationInLatex", - "Latex全文翻译": "FullTextTranslationInLatex", - "Latex英译中": "EnglishToChineseTranslationInLatex", - "Markdown中译英": "ChineseToEnglishTranslationInMarkdown", - "下载arxiv论文并翻译摘要": "DownloadArxivPapersAndTranslateAbstract", - "下载arxiv论文翻译摘要": "DownloadArxivPapersTranslateAbstract", - "连接网络回答问题": "ConnectToInternetToAnswerQuestions", - "联网的ChatGPT": "ChatGPTConnectedToInternet", - "解析任意code项目": "ParsingAnyCodeProject", - "同时问询_指定模型": "InquiryWithSpecifiedModelSimultaneously", - "图片生成": "ImageGeneration", - "test_解析ipynb文件": "TestParsingIpynbFile", - "把字符太少的块清除为回车": "RemoveBlocksWithTooFewCharactersToNewline", - "清理多余的空行": "CleaningUpExtraBlankLines", - "合并小写开头的段落块": "MergeParagraphBlocksStartingWithLowerCase", - "多文件润色": "ProofreadingMultipleFiles", - "多文件翻译": "TranslationOfMultipleFiles", - "解析docx": "ParseDocx", - "解析PDF": "ParsePDF", - "解析Paper": "ParsePaper", - "ipynb解释": "IpynbInterpret", - "解析源代码新": "ParseSourceCodeNew", - "输入区": "輸入區", - "获取文章meta信息": "獲取文章meta信息", - "等待": "等待", - "不能正常加载MOSS的参数!": "無法正常加載MOSS的參數!", - "橙色": "橙色", - "窗口布局": "窗口佈局", - "需要安装pip install py7zr来解压7z文件": "需要安裝pip install py7zr來解壓7z文件", - "上下布局": "上下佈局", - "打开文件": "打開文件", - "可能需要分组处理": "可能需要分組處理", - "用tex格式": "用tex格式", - "按Shift+Enter换行": "按Shift+Enter換行", - "输入路径或上传压缩包": "輸入路徑或上傳壓縮包", - "翻译成地道的中文": "翻譯成地道的中文", - "上下文": "上下文", - "请耐心完成后再提交新问题": "請耐心完成後再提交新問題", - "可以直接修改对话界面内容": "可以直接修改對話界面內容", - "检测输入参数": "檢測輸入參數", - "也许会导致低配计算机卡死 ……": "也許會導致低配計算機卡死……", - "html格式": "html格式", - "不能识别的URL!": "無法識別的URL!", - "第2步": "第2步", - "若上传压缩文件": "若上傳壓縮文件", - "多线程润色开始": "多線程潤色開始", - "警告!API_URL配置选项将被弃用": "警告!API_URL配置選項將被棄用", - "非OpenAI官方接口的出现这样的报错": "非OpenAI官方接口出現這樣的錯誤", - "如果没找到任何文件": "如果沒找到任何文件", - "生成一份任务执行报告": "生成一份任務執行報告", - "而cl**h 的默认本地协议是http": "而cl**h的默認本地協議是http", - "gpt_replying_buffer也写完了": "gpt_replying_buffer也寫完了", - "是本次输出": "是本次輸出", - "展现在报告中的输入": "展現在報告中的輸入", - "和端口": "和端口", - "Pay-as-you-go users的限制是每分钟3500次": "Pay-as-you-go用戶的限制是每分鐘3500次", - "既可以写": "既可以寫", - "输入清除键": "輸入清除鍵", - "gpt模型参数": "gpt模型參數", - "直接清除历史": "直接清除歷史", - "当前模型": "當前模型", - ";5、中文摘要翻译": ";5、中文摘要翻譯", - "将markdown转化为好看的html": "將markdown轉換為好看的html", - "谷歌学术检索助手": "谷歌學術檢索助手", - "后语": "後語", - "请确认是否满足您的需要": "請確認是否滿足您的需要", - "本地路径": "本地路徑", - "sk-此处填API密钥": "sk-此處填API密鑰", - "正常结束": "正常結束", - "排除了以上两个情况": "排除了以上兩個情況", - "把gradio的运行地址更改到指定的二次路径上": "將gradio的運行地址更改到指定的二次路徑上", - "配置其Path环境变量": "配置其Path環境變量", - "的第": "的第", - "减少重复": "減少重複", - "如果超过期限没有喂狗": "如果超過期限沒有餵狗", - "函数的说明请见 request_llms/bridge_all.py": "函數的說明請見 request_llms/bridge_all.py", - "第7步": "第7步", - "说": "說", - "中途接收可能的终止指令": "中途接收可能的終止指令", - "第5次尝试": "第5次嘗試", - "gradio可用颜色列表": "gradio可用顏色列表", - "返回的结果是": "返回的結果是", - "出现的所有文章": "所有出現的文章", - "更换LLM模型/请求源": "更換LLM模型/請求源", - "调用NewBing时": "調用NewBing時", - "AutoGPT是什么": "AutoGPT是什麼", - "则换行符更有可能表示段落分隔": "則換行符更有可能表示段落分隔", - "接收文件后与chatbot的互动": "接收文件後與chatbot的互動", - "每个子任务展现在报告中的输入": "每個子任務展現在報告中的輸入", - "按钮见functional.py": "按鈕見functional.py", - "地址🚀": "地址🚀", - "将长文本分离开来": "將長文本分離開來", - "ChatGLM消耗大量的内存": "ChatGLM消耗大量的內存", - "使用 lru缓存 加快转换速度": "使用lru緩存加快轉換速度", - "屏蔽掉 chatglm的多线程": "屏蔽掉chatglm的多線程", - "不起实际作用": "不起實際作用", - "先寻找到解压的文件夹路径": "先尋找到解壓的文件夾路徑", - "观察窗": "觀察窗", - "请解释以下代码": "請解釋以下代碼", - "使用中文回答我的问题": "使用中文回答我的問題", - "备份一个文件": "備份一個文件", - "未知": "未知", - "其他錯誤": "其他錯誤", - "等待NewBing响应": "等待NewBing回應", - "找不到任何CSharp文件": "找不到任何CSharp檔案", - "插件demo": "插件範例", - "1. 把input的余量留出来": "1. 留出input的餘量", - "如果文章被切分了": "如果文章被切分了", - "或者您没有获得体验资格": "或者您沒有獲得體驗資格", - "修正值": "修正值", - "正在重试": "正在重試", - "展示分割效果": "展示分割效果", - "已禁用": "已禁用", - "抽取摘要": "抽取摘要", - "下载完成": "下載完成", - "无法连接到该网页": "無法連接到該網頁", - "根据以上的对话": "根據以上的對話", - "第1次尝试": "第1次嘗試", - "我们用最暴力的方法切割": "我們用最暴力的方法切割", - "回滚代码到原始的浏览器打开函数": "回滾程式碼到原始的瀏覽器開啟函數", - "先上传存档或输入路径": "先上傳存檔或輸入路徑", - "避免代理网络产生意外污染": "避免代理網路產生意外污染", - "发送图片时": "傳送圖片時", - "第二步": "第二步", - "完成": "完成", - "搜索页面中": "搜索頁面中", - "下载中": "下載中", - "重试一次": "重試一次", - "历史上的今天": "歷史上的今天", - "2. 替换跨行的连词": "2. 替換跨行的連詞", - "协议": "協議", - "批量ChineseToEnglishTranslationInMarkdown": "批量Markdown中文轉英文翻譯", - "也可以直接是": "也可以直接是", - "插件模型的参数": "插件模型的參數", - "也可以根据之前的内容长度来判断段落是否已经足够长": "也可以根據之前的內容長度來判斷段落是否已經足夠長", - "引入一个有cookie的chatbot": "引入一個有cookie的聊天機器人", - "任何文件": "任何文件", - "代码直接生效": "代碼直接生效", - "高级实验性功能模块调用": "高級實驗性功能模塊調用", - "修改函数插件代码后": "修改函數插件代碼後", - "按Enter提交": "按Enter提交", - "天蓝色": "天藍色", - "子任务失败时的重试次数": "子任務失敗時的重試次數", - "格式须是": "請輸入正確的格式", - "调用主体": "調用主體", - "有些文章的正文部分字体大小不是100%统一的": "有些文章正文中字體大小不統一", - "线程": "執行緒", - "是否一键更新代码": "是否一鍵更新程式碼", - "除了基础的pip依赖以外": "除了基礎的pip依賴外", - "紫色": "紫色", - "同样支持多线程": "同樣支援多執行緒", - "这个中文的句号是故意的": "這個中文句號是故意的", - "获取所有文章的标题和作者": "取得所有文章的標題和作者", - "Incorrect API key. OpenAI以提供了不正确的API_KEY为由": "API金鑰錯誤。OpenAI提供了錯誤的API_KEY", - "绿色": "綠色", - "异常": "異常", - "pip install pywin32 用于doc格式": "pip install pywin32 用於doc格式", - "也可以写": "也可以寫", - "请对下面的文章片段用中文做一个概述": "請用中文對下面的文章片段做一個概述", - "上下文管理器是一种Python对象": "上下文管理器是一種Python物件", - "处理文件的上传": "處理檔案的上傳", - "尝试Prompt": "嘗試Prompt", - "检查USE_PROXY选项是否修改": "檢查USE_PROXY選項是否修改", - "改为True应用代理": "將True更改為應用代理", - "3. 如果余量太小了": "如果餘量太小", - "老旧的Demo": "舊版Demo", - "第一部分": "第一部分", - "插件参数区": "插件參數區", - "历史中哪些事件发生在": "歷史中哪些事件發生在", - "现将您的现有配置移动至config_private.py以防止配置丢失": "現在將您現有的配置移動到config_private.py以防止配置丟失", - "当你想发送一张照片时": "當你想發送一張照片時", - "接下来请将以下代码中包含的所有中文转化为英文": "接下來請將以下代碼中包含的所有中文轉化為英文", - "i_say=真正给chatgpt的提问": "i_say=真正給chatgpt的提問", - "解析整个C++项目头文件": "解析整個C++項目頭文件", - "需要安装pip install rarfile来解压rar文件": "需要安裝pip install rarfile來解壓rar文件", - "把已经获取的数据显示出去": "顯示已經獲取的數據", - "红色": "紅色", - "异步任务结束": "異步任務結束", - "进行学术解答": "進行學術解答", - "config_private.py放自己的秘密如API和代理网址": "config_private.py放自己的秘密如API和代理網址", - "学术中英互译": "學術中英互譯", - "选择处理": "選擇處理", - "利用以上信息": "利用以上信息", - "暂时先这样顶一下": "暫時先這樣頂一下", - "如果中文效果不理想": "如果中文效果不理想", - "常见协议无非socks5h/http": "常見協議無非socks5h/http", - "返回文本内容": "返回文本內容", - "用于重组输入参数": "用於重組輸入參數", - "第8步": "第8步", - "可能处于折叠状态": "可能處於折疊狀態", - "重置": "重置", - "清除": "清除", - "放到每个子线程中分别执行": "放到每個子線程中分別執行", - "载入对话历史文件": "載入對話歷史文件", - "列举两条并发送相关图片": "列舉兩條並發送相關圖片", - "然后重试": "然後重試", - "重新URL重新定向": "重新URL重新定向", - "内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块": "內部函數通過使用importlib模塊的reload函數和inspect模塊的getmodule函數來重新加載並獲取函數模塊", - "第一层列表是子任务分解": "第一層列表是子任務分解", - "为发送请求做准备": "為發送請求做準備", - "暂时没有用武之地": "暫時沒有用武之地", - "并对文件中的所有函数生成注释": "並對文件中的所有函數生成註釋", - "分解连字": "分解連字", - "不输入文件名": "不輸入檔案名稱", - "并相应地进行替换": "並相應地進行替換", - "在实验过程中发现调用predict_no_ui处理长文档时": "在實驗過程中發現調用predict_no_ui處理長文檔時", - "提取文本块主字体": "提取文本塊主字體", - "temperature是chatGPT的内部调优参数": "temperature是chatGPT的內部調優參數", - "没办法了": "沒辦法了", - "获取正文主字体": "獲取正文主字體", - "看门狗": "看門狗", - "当前版本": "當前版本", - "这个函数是用来获取指定目录下所有指定类型": "這個函數是用來獲取指定目錄下所有指定類型", - "api_key已导入": "api_key已導入", - "找不到任何.tex或.pdf文件": "找不到任何.tex或.pdf檔案", - "You exceeded your current quota. OpenAI以账户额度不足为由": "您超出了當前配額。OpenAI以帳戶額度不足為由", - "自动更新程序": "自動更新程式", - "并且不要有反斜线": "並且不要有反斜線", - "你必须逐个文献进行处理": "您必須逐個文獻進行處理", - "本地文件地址": "本地檔案地址", - "提取精炼信息": "提取精煉資訊", - "设置用户名和密码": "設置使用者名稱和密碼", - "请不吝PR!": "請不吝PR!", - "通过把連字": "通過將連字", - "文件路徑列表": "檔案路徑清單", - "判定為數據流的結束": "判定為資料流的結束", - "參數": "參數", - "避免不小心傳github被別人看到": "避免不小心傳到github被別人看到", - "記錄刪除註釋後的文本": "記錄刪除註釋後的文字", - "比正文字體小": "比正文字體小", - "上傳本地文件可供紅色函數插件調用": "上傳本地文件供紅色函數插件調用", - "生成圖像": "生成圖像", - "追加歷史": "追加歷史", - "網絡代理狀態": "網絡代理狀態", - "不需要再次轉化": "不需要再次轉換", - "帶超時倒計時": "帶有超時倒數計時", - "保存當前對話": "儲存目前對話", - "等待響應": "等待回應", - "依賴檢測通過": "依賴檢測通過", - "如果要使用ChatGLM": "如果要使用ChatGLM", - "對IPynb文件進行解析": "對IPynb檔案進行解析", - "先切換模型到openai或api2d": "先切換模型到openai或api2d", - "塊元提取": "區塊元素提取", - "调用路径参数已自动修正到": "調用路徑參數已自動修正到", - "且下一个字符为大写字母": "且下一個字符為大寫字母", - "无": "無", - "$c$是光速": "$c$是光速", - "发送请求到OpenAI后": "發送請求到OpenAI後", - "您也可以选择删除此行警告": "您也可以選擇刪除此行警告", - "i_say_show_user=给用户看的提问": "i_say_show_user=給用戶看的提問", - "Endpoint 重定向": "Endpoint 重定向", - "基础功能区": "基礎功能區", - "根据以上你自己的分析": "根據以上你自己的分析", - "以上文件将被作为输入参数": "以上文件將被作為輸入參數", - "已完成": "已完成", - "第2次尝试": "第2次嘗試", - "若输入0": "若輸入0", - "自动缩减文本": "自動縮減文本", - "顺利完成": "順利完成", - "收到": "收到", - "打开浏览器": "打開瀏覽器", - "第5步": "第5步", - "Free trial users的限制是每分钟3次": "Free trial users的限制是每分鐘3次", - "请用markdown格式输出": "請用 Markdown 格式輸出", - "模仿ChatPDF": "模仿 ChatPDF", - "等待多久判定为超时": "等待多久判定為超時", - "请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題", - "IP查询频率受限": "IP查詢頻率受限", - "高级参数输入区的显示提示": "高級參數輸入區的顯示提示", - "的高级参数说明": "的高級參數說明", - "默认开启": "默認開啟", - "为实现更多强大的功能做基础": "為實現更多強大的功能做基礎", - "中文学术润色": "中文學術潤色", - "注意这里的历史记录被替代了": "注意這裡的歷史記錄被替代了", - "子线程任务": "子線程任務", - "个": "個", - "正在加载tokenizer": "正在加載 tokenizer", - "生成http请求": "生成 HTTP 請求", - "从而避免解析压缩文件": "從而避免解析壓縮文件", - "加载参数": "加載參數", - "由于输入长度限制": "由於輸入長度限制", - "如果直接在海外服务器部署": "如果直接在海外伺服器部署", - "你提供了错误的API_KEY": "你提供了錯誤的API_KEY", - "history 是之前的对话列表": "history 是之前的對話列表", - "实现更换API_URL的作用": "實現更換API_URL的作用", - "Json解析不合常规": "Json解析不合常規", - "函数插件-下拉菜单与随变按钮的互动": "函數插件-下拉菜單與隨變按鈕的互動", - "则先将公式转换为HTML格式": "則先將公式轉換為HTML格式", - "1. 临时解决方案": "1. 臨時解決方案", - "如1812.10695": "如1812.10695", - "最后用中文翻译摘要部分": "最後用中文翻譯摘要部分", - "MOSS响应异常": "MOSS響應異常", - "读取pdf文件": "讀取pdf文件", - "重试的次数限制": "重試的次數限制", - "手动指定询问哪些模型": "手動指定詢問哪些模型", - "情况会好转": "情況會好轉", - "超过512个": "超過512個", - "多线": "多線", - "合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格", - "暗色主题": "暗色主題", - "提高限制请查询": "提高限制請查詢", - "您还需要运行": "您還需要執行", - "将双空行": "將雙空行", - "请削减单次输入的文本量": "請減少單次輸入的文本量", - "提高语法、清晰度和整体可读性": "提高語法、清晰度和整體可讀性", - "删除其中的所有注释": "刪除其中的所有註釋", - "列表长度为子任务的数量": "列表長度為子任務的數量", - "直接在输入区键入api_key": "直接在輸入區鍵入api_key", - "方法会在代码块被执行前被调用": "方法會在代碼塊被執行前被調用", - "懂的都懂": "懂的都懂", - "加一个live2d装饰": "加一個live2d裝飾", - "请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分": "請從中提取出“標題”、“收錄會議或期刊”、“作者”、“摘要”、“編號”、“作者郵箱”這六個部分", - "聊天历史": "聊天歷史", - "将插件中出的所有问题显示在界面上": "將插件中出的所有問題顯示在界面上", - "每个子任务的输入": "每個子任務的輸入", - "yield一次以刷新前端页面": "yield一次以刷新前端頁面", - "不能自定义字体和颜色": "不能自定義字體和顏色", - "如果本地使用不建议加这个": "如果本地使用不建議加這個", - "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "例如chatglm&gpt-3.5-turbo&api2d-gpt-4", - "尝试": "嘗試", - "什么都没有": "什麼都沒有", - "代理设置": "代理設置", - "请求处理结束": "請求處理結束", - "将结果写入markdown文件中": "將結果寫入markdown文件中", - "experiment等": "實驗等", - "添加一个萌萌的看板娘": "添加一個萌萌的看板娘", - "现在": "現在", - "当前软件运行的端口号": "當前軟件運行的端口號", - "第n组插件": "第n組插件", - "不受git管控": "不受git管控", - "基础功能区的回调函数注册": "基礎功能區的回調函數註冊", - "句子结束标志": "句子結束標誌", - "GPT参数": "GPT參數", - "按输入的匹配模式寻找上传的非压缩文件和已解压的文件": "按輸入的匹配模式尋找上傳的非壓縮文件和已解壓的文件", - "函数插件贡献者": "函數插件貢獻者", - "用户提示": "用戶提示", - "此版本使用pdfminer插件": "此版本使用pdfminer插件", - "如果换行符前为句子结束标志": "如果換行符前為句子結束標誌", - "在gpt输出代码的中途": "在gpt輸出代碼的中途", - "中转网址预览": "中轉網址預覽", - "自动截断": "自動截斷", - "当無法用標點、空行分割時": "當無法用標點、空行分割時", - "意外Json結構": "意外Json結構", - "需要讀取和清理文本的pdf文件路徑": "需要讀取和清理文本的pdf文件路徑", - "HotReload的裝飾器函數": "HotReload的裝飾器函數", - "chatGPT 分析報告": "chatGPT 分析報告", - "如參考文獻、腳註、圖註等": "如參考文獻、腳註、圖註等", - "的api-key": "的api-key", - "第二組插件": "第二組插件", - "當前代理可用性": "當前代理可用性", - "列表遞歸接龍": "列表遞歸接龍", - "這個bug沒找到觸發條件": "這個bug沒找到觸發條件", - "喚起高級參數輸入區": "喚起高級參數輸入區", - "但大部分場合下並不需要修改": "但大部分場合下並不需要修改", - "盡量是完整的一個section": "盡量選擇完整的一個章節", - "如果OpenAI不響應": "如果OpenAI不響應", - "等文本特殊符號轉換為其基本形式來對文本進行歸一化處理": "等文本特殊符號轉換為其基本形式來對文本進行歸一化處理", - "你的回答必須簡單明了": "你的回答必須簡單明了", - "對話歷史文件損壞!": "對話歷史文件損壞!", - "每一塊": "每一塊", - "如果某個子任務出錯": "如果某個子任務出錯", - "切分和重新整合": "切分和重新整合", - "Token限制下的截断与处理": "Token限制下的截斷與處理", - "仅支持Win平台": "僅支持Win平臺", - "并行任务数量限制": "並行任務數量限制", - "已重置": "已重置", - "如果要使用Newbing": "如果要使用Newbing", - "前言": "前言", - "理解PDF论文内容": "理解PDF論文內容", - "如果有的话": "如果有的話", - "功能区显示开关与功能区的互动": "功能區顯示開關與功能區的互動", - "前者API2D的": "前者API2D的", - "如果要使用MOSS": "如果要使用MOSS", - "源文件太多": "源文件太多", - "ChatGLM尚未加载": "ChatGLM尚未加載", - "不可高于3": "不可高於3", - "运行方法 python crazy_functions/crazy_functions_test.py": "運行方法 python crazy_functions/crazy_functions_test.py", - "清除历史": "清除歷史", - "如果要使用jittorllms": "如果要使用jittorllms", - "更换模型 & SysPrompt & 交互界面布局": "更換模型 & SysPrompt & 交互界面布局", - "是之前的对话列表": "是之前的對話列表", - "开始了吗": "開始了嗎", - "输入": "輸入", - "打开你的*学*网软件查看代理的协议": "打開你的*學*網軟件查看代理的協議", - "默认False": "默認False", - "获取页面上的文本信息": "獲取頁面上的文本信息", - "第一页清理后的文本内容列表": "第一頁清理後的文本內容列表", - "并定义了一个名为decorated的内部函数": "並定義了一個名為decorated的內部函數", - "你是一个学术翻译": "你是一個學術翻譯", - "OpenAI拒绝了请求": "OpenAI拒絕了請求", - "提示": "提示", - "返回重试": "返回重試", - "以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "以下“紅顏色”標識的函數插件需從輸入區讀取路徑作為參數", - "这个函数用stream的方式解决这个问题": "這個函數用stream的方式解決這個問題", - "ChatGPT 学术优化": "ChatGPT 學術優化", - "去除短块": "去除短塊", - "第一组插件": "第一組插件", - "这是什么": "這是什麼", - "在传递chatbot的过程中不要将其丢弃": "在傳遞chatbot的過程中不要將其丟棄", - "下载PDF文档": "下載PDF文檔", - "以下是信息源": "以下是信息源", - "本组文件为": "本組檔案為", - "更新函数代码": "更新函數代碼", - "解析的结果如下": "解析的結果如下", - "逻辑较乱": "邏輯較亂", - "存入": "存入", - "具备完备的交互功能": "具備完備的交互功能", - "安装jittorllms依赖后将完全破坏现有的pytorch环境": "安裝jittorllms依賴後將完全破壞現有的pytorch環境", - "看门狗的耐心": "看門狗的耐心", - "点击展开“文件上传区”": "點擊展開“文件上傳區”", - "翻译摘要等": "翻譯摘要等", - "返回值": "返回值", - "默认允许多少路线程同时访问OpenAI": "默認允許多少路線程同時訪問OpenAI", - "这是第": "這是第", - "把本项目源代码切换成全英文": "把本項目源代碼切換成全英文", - "找不到任何html文件": "找不到任何html文件", - "假如重启失败": "假如重啟失敗", - "感谢热情的": "感謝熱情的", - "您若希望分享新的功能模组": "您若希望分享新的功能模組", - "并在新模块中重新加载函数": "並在新模塊中重新加載函數", - "则会在溢出时暴力截断": "則會在溢出時暴力截斷", - "源码自译解": "原始碼自譯解", - "开始正式执行任务": "開始正式執行任務", - "ChatGLM响应异常": "ChatGLM響應異常", - "用户界面对话窗口句柄": "用戶界面對話窗口句柄", - "左右布局": "左右佈局", - "后面两句是": "後面兩句是", - "可同时填写多个API-KEY": "可同時填寫多個API-KEY", - "对各个llm模型进行单元测试": "對各個llm模型進行單元測試", - "为了更好的效果": "為了更好的效果", - "jittorllms 没有 sys_prompt 接口": "jittorllms沒有sys_prompt接口", - "直接取出来": "直接取出來", - "不具备多线程能力的函数": "不具備多線程能力的函數", - "单行 + 字体大": "單行+字體大", - "正在分析一个源代码项目": "正在分析一個源代碼項目", - "直接退出": "直接退出", - "稍后可能需要再试一次": "稍後可能需要再試一次", - "开始重试": "開始重試", - "没有 sys_prompt 接口": "沒有sys_prompt接口", - "只保留文件名节省token": "只保留文件名節省token", - "肯定已经都结束了": "肯定已經都結束了", - "用&符號分隔": "&", - "但本地存儲了以下歷史文件": "以下是本地儲存的歷史文件清單", - "對全文進行概括": "全文概述", - "以下是一篇學術論文的基礎信息": "以下是學術論文的基本信息", - "正在提取摘要並下載PDF文檔……": "正在提取摘要並下載PDF文件……", - "1. 對原始文本進行歸一化處理": "1. 正規化原始文本", - "問題": "問題", - "用於基礎的對話功能": "用於基礎的對話功能", - "獲取設置": "獲取設置", - "如果缺少依賴": "如果缺少依賴項", - "第6步": "第6步", - "處理markdown文本格式的轉變": "處理Markdown文本格式轉換", - "功能、貢獻者": "功能、貢獻者", - "中文Latex項目全文潤色": "中文LaTeX項目全文潤色", - "等待newbing回復的片段": "等待newbing回復的片段", - "寫入文件": "寫入文件", - "下載pdf文件未成功": "下載PDF文件失敗", - "將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區", - "函數插件作者": "函數插件作者", - "將要匹配的模式": "將要匹配的模式", - "正在分析一个项目的源代码": "正在分析一個專案的源代碼", - "使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔", - "并在被装饰的函数上执行": "並在被裝飾的函數上執行", - "更新完成": "更新完成", - "请先把模型切换至gpt-xxxx或者api2d-xxxx": "請先把模型切換至gpt-xxxx或者api2d-xxxx", - "结果写入文件": "結果寫入文件", - "在执行过程中遭遇问题": "在執行過程中遭遇問題", - "找不到任何文件": "找不到任何文件", - "给gpt的静默提醒": "給gpt的靜默提醒", - "远程返回错误": "遠程返回錯誤", - "例如\\section": "例如\\section", - "该函数详细注释已添加": "該函數詳細注釋已添加", - "对文本进行归一化处理": "對文本進行歸一化處理", - "注意目前不能多人同时调用NewBing接口": "注意目前不能多人同時調用NewBing接口", - "来保留函数的元信息": "來保留函數的元信息", - "一般是文本过长": "一般是文本過長", - "切割PDF": "切割PDF", - "开始下一个循环": "開始下一個循環", - "正在开始汇总": "正在開始匯總", - "建议使用docker环境!": "建議使用docker環境!", - "质能方程是描述质量与能量之间的当量关系的方程": "質能方程是描述質量與能量之間的當量關係的方程", - "子进程执行": "子進程執行", - "清理后的文本内容字符串": "清理後的文本內容字串", - "石板色": "石板色", - "Bad forward key. API2D账户额度不足": "Bad forward key. API2D帳戶額度不足", - "摘要在 .gs_rs 中的文本": "摘要在 .gs_rs 中的文本", - "请复制并转到以下URL": "請複製並轉到以下URL", - "然后用for+append循环重新赋值": "然後用for+append循環重新賦值", - "文章极长": "文章極長", - "请从数据中提取信息": "請從數據中提取信息", - "为了安全而隐藏绝对地址": "為了安全而隱藏絕對地址", - "OpenAI绑了信用卡的用户可以填 16 或者更高": "OpenAI綁了信用卡的用戶可以填 16 或者更高", - "gpt4现在只对申请成功的人开放": "gpt4現在只對申請成功的人開放", - "问号": "問號", - "并合并为一个字符串": "並合併為一個字串", - "文件上传区": "文件上傳區", - "这个函数运行在主进程": "這個函數運行在主進程", - "执行中": "執行中", - "修改函数插件后": "修改函數插件後", - "请你阅读以下学术论文相关的材料": "請你閱讀以下學術論文相關的材料", - "加载需要一段时间": "加載需要一段時間", - "单线程": "單線程", - "5s之后重启": "5秒後重啟", - "文件名是": "文件名是", - "主进程执行": "主進程執行", - "如何理解传奇?": "如何理解傳奇?", - "解析整个Java项目": "解析整個Java項目", - "已成功": "已成功", - "该函数面向希望实现更多有趣功能的开发者": "該函數面向希望實現更多有趣功能的開發者", - "代理所在地": "代理所在地", - "解析Jupyter Notebook文件": "解析Jupyter Notebook文件", - "观测窗": "觀測窗", - "更好的UI视觉效果": "更好的UI視覺效果", - "在此处替换您要搜索的关键词": "在此處替換您要搜索的關鍵詞", - "Token溢出": "Token溢出", - "这段代码来源 https": "這段代碼來源 https", - "请求超时": "請求超時", - "已经被转化过": "已經被轉化過", - "LLM_MODEL 格式不正确!": "LLM_MODEL 格式不正確!", - "先输入问题": "請輸入問題", - "灰色": "灰色", - "锌色": "鋅色", - "里面包含以指定类型为后缀名的所有文件的绝对路径": "包含指定類型後綴名的所有文件的絕對路徑", - "实现插件的热更新": "實現插件的熱更新", - "请对下面的文章片段用中文做概述": "請用中文概述下面的文章片段", - "如果需要在二级路径下运行": "如果需要在二級路徑下運行", - "的分析如下": "的分析如下", - "但端口号都应该在最显眼的位置上": "但端口號都應該在最顯眼的位置上", - "当输入部分的token占比小于限制的3/4时": "當輸入部分的token占比小於限制的3/4時", - "第一次运行": "第一次運行", - "失败了": "失敗了", - "如果包含数学公式": "如果包含數學公式", - "需要配合修改main.py才能生效!": "需要配合修改main.py才能生效!", - "它的作用是……额……就是不起作用": "它的作用是......额......就是不起作用", - "通过裁剪来缩短历史记录的长度": "通過裁剪來縮短歷史記錄的長度", - "chatGPT对话历史": "chatGPT對話歷史", - "它可以作为创建新功能函数的模板": "它可以作為創建新功能函數的模板", - "生成一个请求线程": "生成一個請求線程", - "$m$是质量": "$m$是質量", - ";4、引用数量": ";4、引用數量", - "NewBing响应缓慢": "NewBing響應緩慢", - "提交": "提交", - "test_联网回答问题": "test_聯網回答問題", - "加载tokenizer完毕": "加載tokenizer完畢", - "HotReload 的意思是热更新": "HotReload 的意思是熱更新", - "随便显示点什么防止卡顿的感觉": "隨便顯示點什麼防止卡頓的感覺", - "对整个Markdown项目进行翻译": "對整個Markdown項目進行翻譯", - "替换操作": "替換操作", - "然后通过getattr函数获取函数名": "然後通過getattr函數獲取函數名", - "并替换为空字符串": "並替換為空字符串", - "逐个文件分析已完成": "逐個文件分析已完成", - "填写之前不要忘记把USE_PROXY改成True": "填寫之前不要忘記把USE_PROXY改成True", - "不要遗漏括号": "不要遺漏括號", - "避免包括解释": "避免包括解釋", - "把newbing的长长的cookie放到这里": "把newbing的長長的cookie放到這裡", - "如API和代理网址": "如API和代理網址", - "模块预热": "模塊預熱", - "Latex项目全文英译中": "Latex項目全文英譯中", - "尝试计算比例": "嘗試計算比例", - "OpenAI所允許的最大並行過載": "OpenAI所允許的最大並行過載", - "向chatbot中添加簡單的意外錯誤信息": "向chatbot中添加簡單的意外錯誤信息", - "history至少釋放二分之一": "history至少釋放二分之一", - "”補上": "”補上", - "我們剝離Introduction之後的部分": "我們剝離Introduction之後的部分", - "嘗試加載": "嘗試加載", - "**函數功能**": "**函數功能**", - "藍色": "藍色", - "重置文件的創建時間": "重置文件的創建時間", - "再失敗就沒辦法了": "再失敗就沒辦法了", - "解析整個Python項目": "解析整個Python項目", - "此處不修改": "此處不修改", - "安裝ChatGLM的依賴": "安裝ChatGLM的依賴", - "使用wraps": "使用wraps", - "優先級1. 獲取環境變量作為配置": "優先級1. 獲取環境變量作為配置", - "遞歸地切割PDF文件": "遞歸地切割PDF文件", - "隨變按鈕的回調函數註冊": "隨變按鈕的回調函數註冊", - "我們": "我們", - "然後請使用Markdown格式封裝": "然後請使用Markdown格式封裝", - "網絡的遠程文件": "網絡的遠程文件", - "主进程统一调用函数接口": "主進程統一調用函數介面", - "请按以下描述给我发送图片": "請按以下描述給我發送圖片", - "正常对话时使用": "正常對話時使用", - "不需要高级参数": "不需要高級參數", - "双换行": "雙換行", - "初始值是摘要": "初始值是摘要", - "已经对该文章的所有片段总结完毕": "已經對該文章的所有片段總結完畢", - "proxies格式错误": "proxies格式錯誤", - "一次性完成": "一次性完成", - "设置一个token上限": "設置一個token上限", - "接下来": "接下來", - "以_array结尾的输入变量都是列表": "以_array結尾的輸入變量都是列表", - "收到以下文件": "收到以下文件", - "但显示Token不足": "但顯示Token不足", - "可以多线程并行": "可以多線程並行", - "带Cookies的Chatbot类": "帶Cookies的Chatbot類", - "空空如也的输入栏": "空空如也的輸入欄", - "然后回车键提交后即可生效": "然後回車鍵提交後即可生效", - "这是必应": "這是必應", - "聊天显示框的句柄": "聊天顯示框的句柄", - "集合文件": "集合文件", - "并显示到聊天当中": "並顯示到聊天當中", - "设置5秒即可": "設置5秒即可", - "不懂就填localhost或者127.0.0.1肯定错不了": "不懂就填localhost或者127.0.0.1肯定錯不了", - "安装方法": "安裝方法", - "Openai 限制免费用户每分钟20次请求": "Openai 限制免費用戶每分鐘20次請求", - "建议": "建議", - "将普通文本转换为Markdown格式的文本": "將普通文本轉換為Markdown格式的文本", - "应急食品是“原神”游戏中的角色派蒙的外号": "應急食品是“原神”遊戲中的角色派蒙的外號", - "不要修改!!": "不要修改!!", - "注意无论是inputs还是history": "注意無論是inputs還是history", - "读取Latex文件": "讀取Latex文件", - "\\n 翻译": "\\n 翻譯", - "第 1 步": "第 1 步", - "代理配置": "代理配置", - "temperature是LLM的内部调优参数": "temperature是LLM的內部調優參數", - "解析整个Lua项目": "解析整個Lua項目", - "重试几次": "重試幾次", - "接管gradio默认的markdown处理方式": "接管gradio默認的markdown處理方式", - "请注意自我隐私保护哦!": "請注意自我隱私保護哦!", - "导入软件依赖失败": "導入軟件依賴失敗", - "方便调试和定位问题": "方便調試和定位問題", - "请用代码块输出代码": "請用代碼塊輸出代碼", - "字符数小于100": "字符數小於100", - "程序终止": "程序終止", - "处理历史信息": "處理歷史信息", - "在界面上显示结果": "在界面上顯示結果", - "自动定位": "自動定位", - "读Tex论文写摘要": "讀Tex論文寫摘要", - "截断时的颗粒度": "截斷時的顆粒度", - "第 4 步": "第 4 步", - "正在处理中": "正在處理中", - "酸橙色": "酸橙色", - "分别为 __enter__": "分別為 __enter__", - "Json异常": "Json異常", - "输入过长已放弃": "輸入過長已放棄", - "按照章节切割PDF": "按照章節切割PDF", - "作为切分点": "作為切分點", - "用一句话概括程序的整体功能": "用一句話概括程序的整體功能", - "PDF文件也已经下载": "PDF文件也已經下載", - "您可能选择了错误的模型或请求源": "您可能選擇了錯誤的模型或請求源", - "则终止": "則終止", - "完成了吗": "完成了嗎", - "表示要搜索的文件类型": "表示要搜索的文件類型", - "文件内容是": "文件內容是", - "亮色主题": "亮色主題", - "函数插件输入输出接驳区": "函數插件輸入輸出接驳區", - "异步任务开始": "異步任務開始", - "Index 2 框框": "索引 2 框框", - "方便实现复杂的功能逻辑": "方便實現複雜的功能邏輯", - "警告": "警告", - "放在这里": "放在這裡", - "处理中途中止的情况": "處理中途中止的情況", - "结尾除去一次": "結尾除去一次", - "代码开源和更新": "代碼開源和更新", - "列表": "列表", - "状态": "狀態", - "第9步": "第9步", - "的标识": "的標識", - "Call jittorllms fail 不能正常加载jittorllms的参数": "Call jittorllms 失敗 不能正常加載 jittorllms 的參數", - "中性色": "中性色", - "优先": "優先", - "读取配置": "讀取配置", - "jittorllms消耗大量的内存": "jittorllms消耗大量的內存", - "Latex项目全文中译英": "Latex項目全文中譯英", - "在代理软件的设置里找": "在代理軟件的設置裡找", - "否则将导致每个人的NewBing问询历史互相渗透": "否則將導致每個人的NewBing問詢歷史互相滲透", - "这个函数运行在子进程": "這個函數運行在子進程", - "2. 长效解决方案": "2. 長效解決方案", - "Windows上还需要安装winrar软件": "Windows上還需要安裝winrar軟件", - "正在执行一些模块的预热": "正在執行一些模塊的預熱", - "一键DownloadArxivPapersAndTranslateAbstract": "一鍵DownloadArxivPapersAndTranslateAbstract", - "完成全部响应": "完成全部響應", - "输入中可能存在乱码": "輸入中可能存在亂碼", - "用了很多trick": "用了很多trick", - "填写格式是": "填寫格式是", - "预处理一波": "預處理一波", - "如果只询问1个大语言模型": "如果只詢問1個大語言模型", - "第二部分": "第二部分", - "或历史数据过长. 历史缓存数据已部分释放": "或歷史數據過長. 歷史緩存數據已部分釋放", - "文章内容是": "文章內容是", - "二、论文翻译": "二、論文翻譯", - "汇总报告已经添加到右侧“文件上传区”": "匯總報告已經添加到右側“檔案上傳區”", - "图像中转网址": "圖像中轉網址", - "第4次尝试": "第4次嘗試", - "越新越好": "越新越好", - "解决一个mdx_math的bug": "解決一個mdx_math的bug", - "中间过程不予显示": "中間過程不予顯示", - "路径或网址": "路徑或網址", - "您可以试试让AI写一个Related Works": "您可以試試讓AI寫一個Related Works", - "开始接收chatglm的回复": "開始接收chatglm的回覆", - "环境变量可以是": "環境變數可以是", - "请将此部分润色以满足学术标准": "請將此部分潤色以滿足學術標準", - "* 此函数未来将被弃用": "* 此函數未來將被棄用", - "替换其他特殊字符": "替換其他特殊字元", - "该模板可以实现ChatGPT联网信息综合": "該模板可以實現ChatGPT聯網資訊綜合", - "当前问答": "當前問答", - "洋红色": "洋紅色", - "不需要重启程序": "不需要重啟程式", - "所有线程同时开始执行任务函数": "所有線程同時開始執行任務函數", - "因此把prompt加入 history": "因此將prompt加入歷史", - "刷新界面": "重新整理介面", - "青色": "藍綠色", - "实时在UI上反馈远程数据流": "即時在UI上回饋遠程數據流", - "第一种情况": "第一種情況", - "的耐心": "的耐心", - "提取所有块元的文本信息": "提取所有塊元的文本信息", - "裁剪时": "裁剪時", - "对从 PDF 提取出的原始文本进行清洗和格式化处理": "對從PDF提取出的原始文本進行清洗和格式化處理", - "如果是第一次运行": "如果是第一次運行", - "程序完成": "程式完成", - "api-key不满足要求": "API金鑰不滿足要求", - "布尔值": "布林值", - "尝试导入依赖": "嘗試匯入相依性", - "逐个文件分析": "逐個檔案分析", - "详情见get_full_error的输出": "詳情見get_full_error的輸出", - "检测到": "偵測到", - "手动指定和筛选源代码文件类型": "手動指定和篩選原始程式碼檔案類型", - "进入任务等待状态": "進入任務等待狀態", - "当 输入部分的token占比 小于 全文的一半时": "當輸入部分的token佔比小於全文的一半時", - "查询代理的地理位置": "查詢代理的地理位置", - "是否在输入过长时": "是否在輸入過長時", - "chatGPT分析报告": "chatGPT分析報告", - "然后yeild出去": "然後yield出去", - "用户取消了程序": "使用者取消了程式", - "琥珀色": "琥珀色", - "这里是特殊函数插件的高级参数输入区": "這裡是特殊函數插件的高級參數輸入區", - "第 2 步": "第 2 步", - "字符串": "字串", - "检测到程序终止": "偵測到程式終止", - "对整个Latex项目进行润色": "對整個Latex專案進行潤色", - "方法则会被调用": "方法則會被調用", - "把完整输入-输出结果显示在聊天框": "把完整輸入-輸出結果顯示在聊天框", - "本地文件预览": "本地檔案預覽", - "接下来请你逐文件分析下面的论文文件": "接下來請你逐檔案分析下面的論文檔案", - "英语关键词": "英語關鍵詞", - "一-鿿": "一-鿿", - "尝试识别section": "嘗試識別section", - "用于显示给用户": "用於顯示給使用者", - "newbing回复的片段": "newbing回覆的片段", - "的转化": "的轉換", - "将要忽略匹配的文件名": "將要忽略匹配的檔案名稱", - "生成正则表达式": "生成正則表示式", - "失败时的重试次数": "失敗時的重試次數", - "亲人两行泪": "親人兩行淚", - "故可以只分析文章内容": "故可以只分析文章內容", - "然后回车提交": "然後按下Enter提交", - "并提供改进建议": "並提供改進建議", - "不可多线程": "不可多執行緒", - "这个文件用于函数插件的单元测试": "這個檔案用於函數插件的單元測試", - "用一张Markdown表格简要描述以下文件的功能": "用一張Markdown表格簡要描述以下檔案的功能", - "可用clear将其清空": "可用clear將其清空", - "发送至LLM": "發送至LLM", - "先在input输入编号": "先在input輸入編號", - "更新失败": "更新失敗", - "相关功能不稳定": "相關功能不穩定", - "自动解压": "自動解壓", - "效果奇好": "效果奇佳", - "拆分过长的IPynb文件": "拆分過長的IPynb檔案", - "份搜索结果": "搜尋結果", - "如果没有指定文件名": "如果沒有指定檔案名稱", - "有$标识的公式符号": "有$標識的公式符號", - "跨平台": "跨平台", - "最终": "最終", - "第3次尝试": "第三次嘗試", - "检查代理服务器是否可用": "檢查代理伺服器是否可用", - "再例如一个包含了待处理文件的路径": "再例如一個包含了待處理檔案的路徑", - "注意文章中的每一句话都要翻译": "注意文章中的每一句話都要翻譯", - "修改它": "修改它", - "发送 GET 请求": "發送 GET 請求", - "判定为不是正文": "判定為不是正文", - "默认是.md": "預設是.md", - "终止按钮的回调函数注册": "終止按鈕的回調函數註冊", - "搜索需要处理的文件清单": "搜尋需要處理的檔案清單", - "当历史上下文过长时": "當歷史上下文過長時", - "不包含任何可用于": "不包含任何可用於", - "本项目现已支持OpenAI和API2D的api-key": "本專案現已支援OpenAI和API2D的api-key", - "异常原因": "異常原因", - "additional_fn代表点击的哪个按钮": "additional_fn代表點擊的哪個按鈕", - "注意": "注意", - "找不到任何.docx或doc文件": "找不到任何.docx或doc文件", - "刷新用户界面": "刷新使用者介面", - "失败": "失敗", - "Index 0 文本": "索引 0 文本", - "你需要翻译以下内容": "你需要翻譯以下內容", - "chatglm 没有 sys_prompt 接口": "chatglm 沒有 sys_prompt 介面", - "您的 API_KEY 是": "您的 API_KEY 是", - "请缩减输入文件的数量": "請減少輸入檔案的數量", - "并且将结合上下文内容": "並且將結合上下文內容", - "返回当前系统中可用的未使用端口": "返回目前系統中可用的未使用埠口", - "以下配置可以优化体验": "以下配置可以優化體驗", - "常规情况下": "一般情況下", - "递归": "遞迴", - "分解代码文件": "分解程式碼檔案", - "用户反馈": "使用者回饋", - "第 0 步": "第 0 步", - "即将更新pip包依赖……": "即將更新pip套件相依性......", - "请从": "請從", - "第二种情况": "第二種情況", - "NEWBING_COOKIES未填寫或有格式錯誤": "NEWBING_COOKIES未填寫或格式錯誤", - "以上材料已經被寫入": "以上材料已經被寫入", - "找圖片": "尋找圖片", - "函數插件-固定按鈕區": "函數插件-固定按鈕區", - "該文件中主要包含三個函數": "該文件主要包含三個函數", - "用於與with語句一起使用": "用於與with語句一起使用", - "插件初始化中": "插件初始化中", - "文件讀取完成": "文件讀取完成", - "讀取文件": "讀取文件", - "高危設置!通過修改此設置": "高危設置!通過修改此設置", - "所有文件都總結完成了嗎": "所有文件都總結完成了嗎", - "限制的3/4時": "限制的3/4時", - "取決於": "取決於", - "預處理": "預處理", - "至少一個線程任務Token溢出而失敗": "至少一個線程任務Token溢出而失敗", - "一、論文概況": "一、論文概況", - "TGUI不支持函數插件的實現": "TGUI不支持函數插件的實現", - "拒絕服務": "拒絕服務", - "請更換為API_URL_REDIRECT配置": "請更換為API_URL_REDIRECT配置", - "是否自動處理token溢出的情況": "是否自動處理token溢出的情況", - "和": "和", - "双层列表": "雙層列表", - "做一些外观色彩上的调整": "做一些外觀色彩上的調整", - "发送请求到子进程": "發送請求到子進程", - "配置信息如下": "配置信息如下", - "从而实现分批次处理": "從而實現分批次處理", - "找不到任何.ipynb文件": "找不到任何.ipynb文件", - "代理网络的地址": "代理網絡的地址", - "新版本": "新版本", - "用于实现Python函数插件的热更新": "用於實現Python函數插件的熱更新", - "将中文句号": "將中文句號", - "警告!被保存的对话历史可以被使用该系统的任何人查阅": "警告!被保存的對話歷史可以被使用該系統的任何人查閱", - "用于数据流可视化": "用於數據流可視化", - "第三部分": "第三部分", - "界面更新": "界面更新", - "**输出参数说明**": "**輸出參數說明**", - "其中$E$是能量": "其中$E$是能量", - "这个内部函数可以将函数的原始定义更新为最新版本": "這個內部函數可以將函數的原始定義更新為最新版本", - "不要修改任何LaTeX命令": "不要修改任何LaTeX命令", - "英译中": "英譯中", - "将错误显示出来": "顯示錯誤", - "*代表通配符": "*代表通配符", - "找不到任何lua文件": "找不到任何lua文件", - "准备文件的下载": "準備下載文件", - "爬取搜索引擎的结果": "爬取搜尋引擎的結果", - "例如在windows cmd中": "例如在windows cmd中", - "一般原样传递下去就行": "一般原樣傳遞下去就行", - "免费用户填3": "免費用戶填3", - "在汇总报告中隐藏啰嗦的真实输入": "在匯總報告中隱藏啰嗦的真實輸入", - "Tiktoken未知错误": "Tiktoken未知錯誤", - "整理结果": "整理結果", - "也许等待十几秒后": "也許等待十幾秒後", - "将匹配到的数字作为替换值": "將匹配到的數字作為替換值", - "对每一个源代码文件": "對每一個源代碼文件", - "补上后面的": "補上後面的", - "调用时": "調用時", - "也支持同时填写多个api-key": "也支持同時填寫多個api-key", - "第二层列表是对话历史": "第二層列表是對話歷史", - "询问多个GPT模型": "詢問多個GPT模型", - "您可能需要手动安装新增的依赖库": "您可能需要手動安裝新增的依賴庫", - "隨機負載均衡": "隨機負載均衡", - "等待多線程操作": "等待多線程操作", - "質能方程式": "質能方程式", - "需要預先pip install py7zr": "需要預先pip install py7zr", - "是否丟棄掉 不是正文的內容": "是否丟棄掉 不是正文的內容", - "加載失敗!": "加載失敗!", - "然後再寫一段英文摘要": "然後再寫一段英文摘要", - "從以上搜索結果中抽取信息": "從以上搜索結果中抽取信息", - "response中會攜帶traceback報錯信息": "response中會攜帶traceback報錯信息", - "放到history中": "放到history中", - "不能正常加載jittorllms的參數!": "不能正常加載jittorllms的參數!", - "需要預先pip install rarfile": "需要預先pip install rarfile", - "以免輸入溢出": "以免輸入溢出", - "MOSS消耗大量的內存": "MOSS消耗大量的內存", - "獲取預處理函數": "獲取預處理函數", - "缺少MOSS的依賴": "缺少MOSS的依賴", - "多線程": "多線程", - "結束": "結束", - "請使用Markdown": "請使用Markdown", - "匹配^數字^": "匹配^數字^", - "负责把学术论文准确翻译成中文": "負責將學術論文準確翻譯成中文", - "否则可能导致显存溢出而造成卡顿": "否則可能導致顯存溢出而造成卡頓", - "不输入即全部匹配": "不輸入即全部匹配", - "下面是一些学术文献的数据": "下面是一些學術文獻的數據", - "网络卡顿、代理失败、KEY失效": "網絡卡頓、代理失敗、KEY失效", - "其他的排队等待": "其他的排隊等待", - "表示要搜索的文件或者文件夹路径或网络上的文件": "表示要搜索的文件或者文件夾路徑或網絡上的文件", - "当输入部分的token占比": "當輸入部分的token佔比", - "你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性": "你的任務是改進所提供文本的拼寫、語法、清晰、簡潔和整體可讀性", - "这是什么功能": "這是什麼功能", - "剩下的情况都开头除去": "剩下的情況都開頭除去", - "清除换行符": "清除換行符", - "请提取": "請提取", - "覆盖和重启": "覆蓋和重啟", - "发送至chatGPT": "發送至chatGPT", - "+ 已经汇总的文件组": "+ 已經匯總的文件組", - "插件": "插件", - "OpenAI模型选择是": "OpenAI模型選擇是", - "原文": "原文", - "您可以随时在history子文件夹下找回旧版的程序": "您可以隨時在history子文件夾下找回舊版的程序", - "以确保一些资源在代码块执行期间得到正确的初始化和清理": "以確保一些資源在程式碼區塊執行期間得到正確的初始化和清理", - "它们会继续向下调用更底层的LLM模型": "它們會繼續向下調用更底層的LLM模型", - "GPT输出格式错误": "GPT輸出格式錯誤", - "中译英": "中譯英", - "无代理状态下很可能无法访问OpenAI家族的模型": "無代理狀態下很可能無法訪問OpenAI家族的模型", - "已失败": "已失敗", - "最大线程数": "最大線程數", - "读取时首先看是否存在私密的config_private配置文件": "讀取時首先看是否存在私密的config_private配置文件", - "必要时": "必要時", - "在装饰器内部": "在裝飾器內部", - "api2d 正常完成": "api2d 正常完成", - "您可以调用“LoadConversationHistoryArchive”还原当下的对话": "您可以調用“LoadConversationHistoryArchive”還原當下的對話", - "找不到任何golang文件": "找不到任何golang文件", - "找不到任何rust文件": "找不到任何rust文件", - "输入了已经经过转化的字符串": "輸入了已經經過轉換的字串", - "是否在结束时": "是否在結束時", - "存档文件详情": "存檔文件詳情", - "用英文逗号分割": "用英文逗號分割", - "已删除": "已刪除", - "收到消息": "收到訊息", - "系统输入": "系統輸入", - "读取配置文件": "讀取配置檔", - "跨线程传递": "跨線程傳遞", - "Index 1 字体": "索引 1 字型", - "设定一个最小段落长度阈值": "設定最小段落長度閾值", - "流式获取输出": "流式取得輸出", - "默认按钮颜色是 secondary": "預設按鈕顏色為 secondary", - "请对下面的程序文件做一个概述": "請對下面的程式檔案做一個概述", - "当文件被上传时的回调函数": "當檔案被上傳時的回撥函數", - "对话窗的高度": "對話窗的高度", - "Github更新地址": "Github更新位址", - "然后在用常规的": "然後再用常規的", - "读取Markdown文件": "讀取Markdown檔案", - "会把列表拆解": "會拆解列表", - "OpenAI绑定信用卡可解除频率限制": "OpenAI綁定信用卡可解除頻率限制", - "可能需要一点时间下载参数": "可能需要一點時間下載參數", - "需要访问谷歌": "需要訪問谷歌", - "根据给定的匹配结果来判断换行符是否表示段落分隔": "根據給定的匹配結果來判斷換行符是否表示段落分隔", - "请提交新问题": "請提交新問題", - "测试功能": "測試功能", - "尚未充分测试的函数插件": "尚未充分測試的函數插件", - "解析此项目本身": "解析此專案本身", - "提取摘要": "提取摘要", - "用于输入给GPT的前提提示": "用於輸入給GPT的前提提示", - "第一步": "第一步", - "此外": "此外", - "找不到任何前端相关文件": "找不到任何前端相關檔案", - "输入其他/无输入+回车=不更新": "輸入其他/無輸入+回車=不更新", - "句号": "句號", - "如果最后成功了": "如果最後成功了", - "导致输出不完整": "導致輸出不完整", - "并修改代码拆分file_manifest列表": "並修改程式碼拆分file_manifest列表", - "在读取API_KEY时": "在讀取API_KEY時", - "迭代地历遍整个文章": "迭代地歷遍整個文章", - "存在一行极长的文本!": "存在一行極長的文字!", - "private_upload里面的文件名在解压zip后容易出现乱码": "private_upload裡面的檔案名在解壓縮zip後容易出現亂碼", - "清除当前溢出的输入": "清除當前溢出的輸入", - "只输出转化后的英文代码": "只輸出轉換後的英文程式碼", - "打开插件列表": "打開外掛程式列表", - "查询版本和用户意见": "查詢版本和使用者意見", - "需要用此选项防止高频地请求openai导致错误": "需要用此選項防止高頻地請求openai導致錯誤", - "有肉眼不可见的小变化": "有肉眼不可見的小變化", - "返回一个新的字符串": "返回一個新的字串", - "如果是.doc文件": "如果是.doc文件", - "英语学术润色": "英語學術潤色", - "已经全部完成": "已經全部完成", - "该文件中主要包含2个函数": "該文件中主要包含2個函數", - "捕捉函数f中的异常并封装到一个生成器中返回": "捕捉函數f中的異常並封裝到一個生成器中返回", - "兼容旧版的配置": "兼容舊版的配置", - "LLM的内部调优参数": "LLM的內部調優參數", - "请查收": "請查收", - "输出了前面的": "輸出了前面的", - "用多种方式组合": "用多種方式組合", - "等待中": "等待中", - "从最长的条目开始裁剪": "從最長的條目開始裁剪", - "就是临时文件夹的路径": "就是臨時文件夾的路徑", - "体验gpt-4可以试试api2d": "體驗gpt-4可以試試api2d", - "提交任务": "提交任務", - "已配置": "已配置", - "第三方库": "第三方庫", - "将y中最后一项的输入部分段落化": "將y中最後一項的輸入部分段落化", - "高级函数插件": "Advanced Function Plugin", - "等待jittorllms响应中": "Waiting for jittorllms response", - "解析整个C++项目": "Parsing the entire C++ project", - "你是一名专业的学术教授": "You are a professional academic professor", - "截断重试": "Truncated retry", - "即在代码结构不变得情况下取代其他的上下文管理器": "That is, replace other context managers without changing the code structure", - "表示函数是否成功执行": "Indicates whether the function was executed successfully", - "处理多模型并行等细节": "Handling details such as parallelism of multiple models", - "不显示中间过程": "Do not display intermediate process", - "chatGPT的内部调优参数": "Internal tuning parameters of chatGPT", - "你必须使用Markdown表格": "You must use Markdown tables", - "第 5 步": "Step 5", - "jittorllms响应异常": "jittorllms response exception", - "在项目根目录运行这两个指令": "Run these two commands in the project root directory", - "获取tokenizer": "Get tokenizer", - "chatbot 为WebUI中显示的对话列表": "chatbot is the list of conversations displayed in WebUI", - "test_解析一个Cpp项目": "test_parse a Cpp project", - "将对话记录history以Markdown格式写入文件中": "Write the conversations record history to a file in Markdown format", - "装饰器函数": "Decorator function", - "玫瑰色": "Rose color", - "将单空行": "刪除單行空白", - "祖母绿": "綠松石色", - "整合所有信息": "整合所有資訊", - "如温度和top_p等": "例如溫度和top_p等", - "重试中": "重試中", - "月": "月份", - "localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上", - "的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token", - "抽取可用的api-key": "提取可用的api-key", - "增强报告的可读性": "增強報告的可讀性", - "对话历史": "對話歷史", - "-1代表随机端口": "-1代表隨機端口", - "在函数插件中被调用": "在函數插件中被調用", - "向chatbot中添加错误信息": "向chatbot中添加錯誤訊息", - "代理可能无效": "代理可能無效", - "比如introduction": "例如introduction", - "接下来请你逐文件分析下面的工程": "接下來請你逐文件分析下面的工程", - "任务函数": "任務函數", - "删除所有历史对话文件": "刪除所有歷史對話檔案", - "找不到任何.md文件": "找不到任何.md文件", - "给出输出文件清单": "給出輸出文件清單", - "不能正常加载ChatGLM的参数!": "無法正常加載ChatGLM的參數!", - "不详": "不詳", - "提取出以下内容": "提取出以下內容", - "请注意": "請注意", - "不能加载Newbing组件": "無法加載Newbing組件", - "您既可以在config.py中修改api-key": "您可以在config.py中修改api-key", - "但推荐上传压缩文件": "但建議上傳壓縮文件", - "支持任意数量的llm接口": "支持任意數量的llm接口", - "材料如下": "材料如下", - "停止": "停止", - "gradio的inbrowser触发不太稳定": "gradio的inbrowser觸發不太穩定", - "带token约简功能": "帶token約簡功能", - "解析项目": "解析項目", - "尝试识别段落": "嘗試識別段落", - "输入栏用户输入的文本": "輸入欄用戶輸入的文本", - "清理规则包括": "清理規則包括", - "新版配置": "新版配置", - "如果有": "如果有", - "Call MOSS fail 不能正常加載MOSS的參數": "Call MOSS fail 不能正常加載MOSS的參數", - "根據以上分析": "根據以上分析", - "一些普通功能模塊": "一些普通功能模塊", - "汇总报告如何远程获取": "如何遠程獲取匯總報告", - "热更新prompt": "熱更新提示", - "插件调度异常": "插件調度異常", - "英文Latex项目全文润色": "英文Latex項目全文潤色", - "此外我们也提供可同步处理大量文件的多线程Demo供您参考": "此外我們也提供可同步處理大量文件的多線程Demo供您參考", - "则不解析notebook中的Markdown块": "則不解析notebook中的Markdown塊", - "备选输入区": "備選輸入區", - "个片段": "個片段", - "总结输出": "總結輸出", - "2. 把输出用的余量留出来": "2. 把輸出用的餘量留出來", - "请对下面的文章片段做一个概述": "請對下面的文章片段做一個概述", - "多线程方法": "多線程方法", - "下面是对每个参数和返回值的说明": "下面是對每個參數和返回值的說明", - "由于请求gpt需要一段时间": "由於請求gpt需要一段時間", - "历史": "歷史", - "用空格或段落分隔符替换原换行符": "用空格或段落分隔符替換原換行符", - "查找语法错误": "查找語法錯誤", - "输出 Returns": "輸出 Returns", - "在config.py中配置": "在config.py中配置", - "找不到任何.tex文件": "找不到任何.tex文件", - "一键更新协议": "一鍵更新協議", - "gradio版本较旧": "gradio版本較舊", - "灵活而简洁": "靈活而簡潔", - "等待NewBing响应中": "等待NewBing響應中", - "更多函数插件": "更多函數插件", - "作为一个标识而存在": "作為一個標識而存在", - "GPT模型返回的回复字符串": "GPT模型返回的回復字串", - "请从给定的若干条搜索结果中抽取信息": "請從給定的若干條搜索結果中抽取信息", - "请对下面的文章片段做概述": "請對下面的文章片段做概述", - "历史对话输入": "歷史對話輸入", - "请稍等": "請稍等", - "整理报告的格式": "整理報告的格式", - "保存当前的对话": "保存當前的對話", - "代理所在地查询超时": "代理所在地查詢超時", - "inputs 是本次问询的输入": "inputs是本次問詢的輸入", - "网页的端口": "網頁的端口", - "仅仅服务于视觉效果": "僅僅服務於視覺效果", - "把结果写入文件": "把結果寫入文件", - "留空即可": "留空即可", - "按钮颜色": "按鈕顏色", - "借鉴了 https": "借鉴了 https", - "Token溢出数": "Token溢出數", - "找不到任何java文件": "找不到任何java文件", - "批量总结Word文档": "批量總結Word文檔", - "一言以蔽之": "一言以蔽之", - "提取字体大小是否近似相等": "提取字體大小是否近似相等", - "直接给定文件": "直接給定文件", - "使用该模块需要额外依赖": "使用該模塊需要額外依賴", - "的配置": "的配置", - "pip install python-docx 用于docx格式": "pip install python-docx 用於docx格式", - "正在查找对话历史文件": "正在查找對話歷史文件", - "输入已识别为openai的api_key": "輸入已識別為openai的api_key", - "对整个Latex项目进行翻译": "對整個Latex項目進行翻譯", - "Y+回车=确认": "Y+回車=確認", - "正在同时咨询ChatGPT和ChatGLM……": "正在同時諮詢ChatGPT和ChatGLM……", - "根据 heuristic 规则": "根據heuristic規則", - "如1024x1024": "如1024x1024", - "函数插件区": "函數插件區", - "*** API_KEY 导入成功": "*** API_KEY 導入成功", - "请对下面的程序文件做一个概述文件名是": "請對下面的程序文件做一個概述文件名是", - "內容太長了都會觸發token數量溢出的錯誤": "內容太長了都會觸發token數量溢出的錯誤", - "沒有提供高級參數功能說明": "未提供高級參數功能說明", - "和openai的連接容易斷掉": "和openai的連接容易斷掉", - "分组+迭代处理": "分組+迭代處理", - "安装Newbing的依赖": "安裝Newbing的依賴", - "批": "批", - "代理与自动更新": "代理與自動更新", - "读取pdf文件并清理其中的文本内容": "讀取pdf文件並清理其中的文本內容", - "多线程Demo": "多線程Demo", - "\\cite和方程式": "\\cite和方程式", - "可能会导致严重卡顿": "可能會導致嚴重卡頓", - "将Markdown格式的文本转换为HTML格式": "將Markdown格式的文本轉換為HTML格式", - "建议您复制一个config_private.py放自己的秘密": "建議您複製一個config_private.py放自己的秘密", - "质能方程可以写成$$E=mc^2$$": "質能方程可以寫成$$E=mc^2$$", - "的文件": "的文件", - "是本次问询的输入": "是本次問詢的輸入", - "第三种情况": "第三種情況", - "如果同时InquireMultipleLargeLanguageModels": "如果同時InquireMultipleLargeLanguageModels", - "小于正文的": "小於正文的", - "将输入和输出解析为HTML格式": "將輸入和輸出解析為HTML格式", - "您正在调用一个": "您正在調用一個", - "缺少jittorllms的依赖": "缺少jittorllms的依賴", - "是否重置": "是否重置", - "解析整个前端项目": "解析整個前端專案", - "是否唤起高级插件参数区": "是否喚起高級插件參數區", - "pip包依赖安装出现问题": "pip包依賴安裝出現問題", - "请先转化为.docx格式": "請先轉換為.docx格式", - "整理history": "整理歷史記錄", - "缺少api_key": "缺少api_key", - "拆分过长的latex文件": "拆分過長的latex文件", - "使用markdown表格输出结果": "使用markdown表格輸出結果", - "搜集初始信息": "搜集初始信息", - "但还没输出完后面的": "但還沒輸出完後面的", - "在上下文执行开始的情况下": "在上下文執行開始的情況下", - "不要用代码块": "不要用代碼塊", - "比如你是翻译官怎样怎样": "例如你是翻譯官怎樣怎樣", - "装饰器函数返回内部函数": "裝飾器函數返回內部函數", - "请你作为一个学术翻译": "請你作為一個學術翻譯", - "清除重复的换行": "清除重複的換行", - "换行 -": "換行 -", - "你好": "你好", - "触发重置": "觸發重置", - "安装MOSS的依赖": "安裝MOSS的依賴", - "首先你在英文語境下通讀整篇論文": "首先你在英文語境下通讀整篇論文", - "需要清除首尾空格": "需要清除首尾空格", - "多線程函數插件中": "多線程函數插件中", - "分析用戶提供的谷歌學術": "分析用戶提供的谷歌學術", - "基本信息": "基本信息", - "python 版本建議3.9+": "python 版本建議3.9+", - "開始請求": "開始請求", - "不會實時顯示在界面上": "不會實時顯示在界面上", - "接下來兩句話只顯示在界面上": "接下來兩句話只顯示在界面上", - "根據當前的模型類別": "根據當前的模型類別", - "10個文件為一組": "10個文件為一組", - "第三組插件": "第三組插件", - "此函數逐漸地搜索最長的條目進行剪輯": "此函數逐漸地搜索最長的條目進行剪輯", - "拆分過長的Markdown文件": "拆分過長的Markdown文件", - "最多同時執行5個": "最多同時執行5個", - "裁剪input": "裁剪input", - "現在您點擊任意“紅顏色”標識的函數插件時": "現在您點擊任意“紅顏色”標識的函數插件時", - "且沒有代碼段": "且沒有代碼段", - "建議低於1": "建議低於1", - "並且對於網絡上的文件": "並且對於網絡上的文件", - "文件代码是": "檔案代碼是", - "我上传了文件": "我上傳了檔案", - "年份获取失败": "年份獲取失敗", - "解析网页内容": "解析網頁內容", - "但内部用stream的方法避免中途网线被掐": "但內部使用stream的方法避免中途網路斷線", - "这个函数用于分割pdf": "這個函數用於分割PDF", - "概括其内容": "概括其內容", - "请谨慎操作": "請謹慎操作", - "更新UI": "更新使用者介面", - "输出": "輸出", - "请先从插件列表中选择": "請先從插件列表中選擇", - "函数插件": "函數插件", - "的方式启动": "的方式啟動", - "否则在回复时会因余量太少出问题": "否則在回覆時會因餘量太少出問題", - "并替换为回车符": "並替換為換行符號", - "Newbing失败": "Newbing失敗", - "找不到任何.h头文件": "找不到任何.h頭檔案", - "执行时": "執行時", - "不支持通过环境变量设置!": "不支持透過環境變數設置!", - "获取完整的从Openai返回的报错": "獲取完整的從Openai返回的錯誤", - "放弃": "放棄", - "系统静默prompt": "系統靜默提示", - "如果子任务非常多": "如果子任務非常多", - "打印traceback": "列印追蹤信息", - "前情提要": "前情提要", - "请在config文件中修改API密钥之后再运行": "請在config文件中修改API密鑰之後再運行", - "使用正则表达式查找注释": "使用正則表達式查找註釋", - "这段代码定义了一个名为DummyWith的空上下文管理器": "這段代碼定義了一個名為DummyWith的空上下文管理器", - "用学术性语言写一段中文摘要": "用學術性語言寫一段中文摘要", - "优先级3. 获取config中的配置": "優先級3. 獲取config中的配置", - "此key无效": "此key無效", - "对话历史列表": "對話歷史列表", - "循环轮询各个线程是否执行完毕": "循環輪詢各個線程是否執行完畢", - "处理数据流的主体": "處理數據流的主體", - "综合": "綜合", - "感叹号": "感嘆號", - "浮点数": "浮點數", - "必要时再进行切割": "必要時再進行切割", - "请注意proxies选项的格式": "請注意proxies選項的格式", - "我需要你找一张网络图片": "我需要你找一張網絡圖片", - "裁剪输入": "裁剪輸入", - "这里其实不需要join了": "這裡其實不需要join了", - "例如 v2**y 和 ss* 的默认本地协议是socks5h": "例如 v2**y 和 ss* 的默認本地協議是socks5h", - "粉红色": "粉紅色", - "llm_kwargs参数": "llm_kwargs參數", - "设置gradio的并行线程数": "設置gradio的並行線程數", - "端口": "端口", - "将每个换行符替换为两个换行符": "將每個換行符替換為兩個換行符", - "防止回答时Token溢出": "防止回答時Token溢出", - "单线": "單線", - "成功读取环境变量": "成功讀取環境變量", - "GPT返回的结果": "GPT返回的結果", - "函数插件功能": "函數插件功能", - "根据前后相邻字符的特点": "根據前後相鄰字符的特點", - "发送到chatgpt进行分析": "發送到chatgpt進行分析", - "例如": "例如", - "翻译": "翻譯", - "选择放弃": "選擇放棄", - "将输出代码片段的“后面的": "將輸出代碼片段的“後面的", - "两个指令来安装jittorllms的依赖": "兩個指令來安裝jittorllms的依賴", - "不在arxiv中无法获取完整摘要": "無法在arxiv中取得完整摘要", - "读取默认值作为数据类型转换的参考": "讀取預設值作為資料型態轉換的參考", - "最后": "最後", - "用于负责跨越线程传递已经输出的部分": "用於負責跨越線程傳遞已經輸出的部分", - "请避免混用多种jittor模型": "請避免混用多種jittor模型", - "等待输入": "等待輸入", - "默认": "預設", - "读取PDF文件": "讀取PDF文件", - "作为一名中文学术论文写作改进助理": "作為一名中文學術論文寫作改進助理", - "如果WEB_PORT是-1": "如果WEB_PORT是-1", - "虽然不同的代理软件界面不一样": "雖然不同的代理軟體介面不一樣", - "选择LLM模型": "選擇LLM模型", - "回车退出": "按Enter退出", - "第3步": "第3步", - "找到原文本中的换行符": "找到原文本中的換行符號", - "表示文件所在的文件夹路径": "表示文件所在的資料夾路徑", - "您可以请再次尝试.": "您可以請再次嘗試。", - "其他小工具": "其他小工具", - "开始问问题": "開始問問題", - "默认值": "預設值", - "正在获取文献名!": "正在獲取文獻名稱!", - "也可以在问题输入区输入临时的api-key": "也可以在問題輸入區輸入臨時的api-key", - "单$包裹begin命令时多余": "單$包裹begin命令時多餘", - "从而达到实时更新功能": "從而達到實時更新功能", - "开始接收jittorllms的回复": "開始接收jittorllms的回覆", - "防止爆token": "防止爆token", - "等待重试": "等待重試", - "解析整个Go项目": "解析整個Go項目", - "解析整个Rust项目": "解析整個Rust項目", - "则随机选取WEB端口": "則隨機選取WEB端口", - "不输入代表全部匹配": "不輸入代表全部匹配", - "在前端打印些好玩的东西": "在前端打印些好玩的東西", - "而在上下文执行结束时": "而在上下文執行結束時", - "会自动使用已配置的代理": "會自動使用已配置的代理", - "第 3 步": "第 3 步", - "稍微留一点余地": "稍微留一點余地", - "靛蓝色": "靛藍色", - "改变输入参数的顺序与结构": "改變輸入參數的順序與結構", - "中提取出“标题”、“收录会议或期刊”等基本信息": "中提取出“標題”、“收錄會議或期刊”等基本信息", - "刷新界面用 yield from update_ui": "刷新界面用 yield from update_ui", - "下载编号": "下載編號", - "来自EdgeGPT.py": "來自EdgeGPT.py", - "每个子任务的输出汇总": "每個子任務的輸出匯總", - "你是一位专业的中文学术论文作家": "你是一位專業的中文學術論文作家", - "加了^代表不匹配": "加了^代表不匹配", - "则覆盖原config文件": "則覆蓋原config文件", - "提交按钮、重置按钮": "提交按鈕、重置按鈕", - "对程序的整体功能和构架重新做出概括": "對程式的整體功能和架構重新做出概述", - "未配置": "未配置", - "文本过长将进行截断": "文本過長將進行截斷", - "将英文句号": "將英文句號", - "则使用当前时间生成文件名": "則使用當前時間生成檔名", - "或显存": "或顯存", - "请只提供文本的更正版本": "請只提供文本的更正版本", - "大部分时候仅仅为了fancy的视觉效果": "大部分時候僅僅為了fancy的視覺效果", - "不能达到预期效果": "不能達到預期效果", - "css等": "css等", - "该函数只有20多行代码": "該函數只有20多行程式碼", - "以下是一篇学术论文中的一段内容": "以下是一篇學術論文中的一段內容", - "Markdown/Readme英译中": "Markdown/Readme英譯中", - "递归搜索": "遞歸搜尋", - "检查一下是不是忘了改config": "檢查一下是不是忘了改config", - "不需要修改": "不需要修改", - "请求GPT模型同时维持用户界面活跃": "請求GPT模型同時維持用戶界面活躍", - "是本次输入": "是本次輸入", - "随便切一下敷衍吧": "隨便切一下敷衍吧", - "紫罗兰色": "紫羅蘭色", - "显示/隐藏功能区": "顯示/隱藏功能區", - "加入下拉菜单中": "加入下拉菜單中", - "等待ChatGLM响应中": "等待ChatGLM響應中", - "代码已经更新": "代碼已經更新", - "总结文章": "總結文章", - "正常": "正常", - "降低请求频率中": "降低請求頻率中", - "3. 根据 heuristic 规则判断换行符是否是段落分隔": "3. 根據heuristic規則判斷換行符是否是段落分隔", - "整理反复出现的控件句柄组合": "整理反復出現的控件句柄組合", - "则给出安装建议": "則給出安裝建議", - "我们先及时地做一次界面更新": "我們先及時地做一次界面更新", - "数据流的显示最后收到的多少个字符": "數據流的顯示最後收到的多少個字符", - "并将输出部分的Markdown和数学公式转换为HTML格式": "並將輸出部分的Markdown和數學公式轉換為HTML格式", - "rar和7z格式正常": "rar和7z格式正常", - "代码高亮": "程式碼高亮", - "和 __exit__": "和 __exit__", - "黄色": "黃色", - "使用线程池": "使用線程池", - "的主要内容": "的主要內容", - "定义注释的正则表达式": "定義註釋的正則表達式", - "Reduce the length. 本次输入过长": "減少長度。本次輸入過長", - "具备多线程调用能力的函数": "具備多線程調用能力的函數", - "你是一个程序架构分析师": "你是一個程式架構分析師", - "MOSS尚未加载": "MOSS尚未載入", - "环境变量": "環境變數", - "请分析此页面中出现的所有文章": "請分析此頁面中出現的所有文章", - "只裁剪历史": "只裁剪歷史", - "在结束时": "在結束時", - "缺一不可": "缺一不可", - "第10步": "第10步", - "安全第一条": "安全第一條", - "解释代码": "解釋程式碼", - "地址": "地址", - "全部文件解析完成": "全部檔案解析完成", - "乱七八糟的后处理": "亂七八糟的後處理", - "输入时用逗号隔开": "輸入時用逗號隔開", - "对最相关的两个搜索结果进行总结": "對最相關的兩個搜索結果進行總結", - "第": "第", - "清空历史": "清空歷史", - "引用次数是链接中的文本": "引用次數是鏈接中的文本", - "时": "時", - "如没有给定输入参数": "如沒有給定輸入參數", - "与gradio版本和网络都相关": "與gradio版本和網絡都相關", - "润色": "潤色", - "青蓝色": "青藍色", - "如果浏览器没有自动打开": "如果瀏覽器沒有自動打開", - "新功能": "新功能", - "会把traceback和已经接收的数据转入输出": "會把traceback和已經接收的數據轉入輸出", - "在这里输入分辨率": "在這裡輸入分辨率", - "至少一个线程任务意外失败": "至少一個線程任務意外失敗", - "子进程Worker": "子進程Worker", - "使用yield from语句返回重新加载过的函数": "使用yield from語句返回重新加載過的函數", - "网络等出问题时": "網絡等出問題時", - "does not exist. 模型不存在": "不存在該模型", - "本地LLM模型如ChatGLM的执行方式 CPU/GPU": "本地LLM模型如ChatGLM的執行方式 CPU/GPU", - "如果选择自动处理": "如果選擇自動處理", - "找不到本地项目或无权访问": "找不到本地專案或無權訪問", - "是否在arxiv中": "是否在arxiv中", - "版": "版", - "数据流的第一帧不携带content": "數據流的第一幀不攜帶content", - "OpenAI和API2D不会走这里": "OpenAI和API2D不會走這裡", - "请编辑以下文本": "請編輯以下文本", - "尽可能多地保留文本": "盡可能多地保留文本", - "将文本按照段落分隔符分割开": "將文本按照段落分隔符分割開", - "获取成功": "獲取成功", - "然后回答问题": "然後回答問題", - "同时分解长句": "同時分解長句", - "刷新时间间隔频率": "刷新時間間隔頻率", - "您可以将任意一个文件路径粘贴到输入区": "您可以將任意一個文件路徑粘貼到輸入區", - "需要手动安装新增的依赖库": "需要手動安裝新增的依賴庫", - "的模板": "的模板", - "重命名文件": "重命名文件", - "第1步": "第1步", - "只输出代码": "只輸出代碼", - "准备对工程源代码进行汇总分析": "準備對工程源代碼進行匯總分析", - "是所有LLM的通用接口": "是所有LLM的通用接口", - "等待回复": "等待回覆", - "此线程失败前收到的回答": "此線程失敗前收到的回答", - "Call ChatGLM fail 不能正常加载ChatGLM的参数": "呼叫ChatGLM失敗,無法正常加載ChatGLM的參數", - "输入参数 Args": "輸入參數Args", - "也可以获取它": "也可以獲取它", - "请求GPT模型的": "請求GPT模型的", - "您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!": "您將把您的API-KEY和對話隱私完全暴露給您設定的中間人!", - "等待MOSS响应中": "等待MOSS響應中", - "文件保存到本地": "文件保存到本地", - "例如需要翻译的一段话": "例如需要翻譯的一段話", - "避免解析压缩文件": "避免解析壓縮文件", - "另外您可以随时在history子文件夹下找回旧版的程序": "另外您可以隨時在history子文件夾下找回舊版的程式", - "由于您没有设置config_private.py私密配置": "由於您沒有設置config_private.py私密配置", - "缺少ChatGLM的依赖": "缺少ChatGLM的依賴", - "试着补上后个": "試著補上後個", - "如果是网络上的文件": "如果是網路上的檔案", - "找不到任何.tex或pdf文件": "找不到任何.tex或pdf檔案", - "直到历史记录的标记数量降低到阈值以下": "直到歷史記錄的標記數量降低到閾值以下", - "当代码输出半截的时候": "當程式碼輸出一半時", - "输入区2": "輸入區2", - "则删除报错信息": "則刪除錯誤訊息", - "如果需要使用newbing": "如果需要使用newbing", - "迭代之前的分析": "迭代之前的分析", - "单线程方法": "單線程方法", - "装载请求内容": "載入請求內容", - "翻译为中文": "翻譯為中文", - "以及代理设置的格式是否正确": "以及代理設置的格式是否正確", - "石头色": "石頭色", - "输入谷歌学术搜索页url": "輸入谷歌學術搜索頁URL", - "可选 ↓↓↓": "可選 ↓↓↓", - "再点击按钮": "再點擊按鈕", - "开发者们❤️": "開發者們❤️", - "若再次失败则更可能是因为输入过长.": "若再次失敗則更可能是因為輸入過長。", - "载入对话": "載入對話", - "包括": "包括", - "或者": "或者", - "并执行函数的新版本": "並執行函數的新版本", - "论文": "論文", - "解析一个Golang项目": "ParseAGolangProject", - "Latex英文纠错": "LatexEnglishCorrection", - "连接bing搜索回答问题": "ConnectToBingSearchForAnswer", - "联网的ChatGPT_bing版": "ChatGPT_BingVersionOnline", - "总结音视频": "SummarizeAudioAndVideo", - "动画生成": "GenerateAnimations", - "数学动画生成manim": "GenerateMathematicalAnimationsWithManim", - "Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage", - "知识库问答": "KnowledgeBaseQA", - "Langchain知识库": "LangchainKnowledgeBase", - "读取知识库作答": "ReadKnowledgeBaseAndAnswerQuestions", - "交互功能模板函数": "InteractiveFunctionTemplateFunctions", - "交互功能函数模板": "InteractiveFunctionFunctionTemplates", - "Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison", - "Latex输出PDF": "OutputPDFFromLatex", - "Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF", - "语音助手": "VoiceAssistant", - "微调数据集生成": "FineTuneDatasetGeneration", - "chatglm微调工具": "ChatGLM_FineTuningTool", - "启动微调": "StartFineTuning", - "sprint亮靛": "SprintLiangDian", - "寻找Latex主文件": "FindLatexMainFile", - "专业词汇声明": "ProfessionalTerminologyDeclaration", - "Latex精细分解与转化": "LatexFineDecompositionAndConversion", - "编译Latex": "CompileLatex", - "正在等您说完问题": "正在等您說完問題", - "最多同时执行5个": "最多同時執行5個", - "将文件复制一份到下载区": "將檔案複製一份到下載區", - "您接下来不能再使用其他插件了": "您接下來不能再使用其他插件了", - "如 绿帽子*深蓝色衬衫*黑色运动裤": "如 綠帽子*深藍色襯衫*黑色運動褲", - "首先你在中文语境下通读整篇论文": "首先您在中文語境下通讀整篇論文", - "根据给定的切割时长将音频文件切割成多个片段": "根據給定的切割時長將音訊檔切割成多個片段", - "接下来两句话只显示在界面上": "接下來兩句話只顯示在介面上", - "清空label": "清空標籤", - "正在尝试自动安装": "正在嘗試自動安裝", - "MOSS消耗大量的内存": "MOSS消耗大量的記憶體", - "如果这里报错": "如果這裡報錯", - "其他类型文献转化效果未知": "其他類型文獻轉換效果未知", - "ChatGPT综合": "ChatGPT綜合", - "音频文件的路径": "音訊檔案的路徑", - "执行错误": "執行錯誤", - "因此选择GenerateImage函数": "因此選擇GenerateImage函數", - "从摘要中提取高价值信息": "從摘要中提取高價值資訊", - "使用英文": "使用英文", - "是否在提交时自动清空输入框": "是否在提交時自動清空輸入框", - "生成数学动画": "生成數學動畫", - "正在加载Claude组件": "正在載入Claude元件", - "参数说明": "參數說明", - "建议排查": "建議排查", - "将消耗较长时间下载中文向量化模型": "將消耗較長時間下載中文向量化模型", - "test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBase讀取", - "安装Claude的依赖": "安裝Claude的相依性", - "以下所有配置也都支持利用环境变量覆写": "以下所有配置也都支持利用環境變數覆寫", - "需要被切割的音频文件名": "需要被切割的音頻文件名", - "保存当前对话": "保存當前對話", - "功能、贡献者": "功能、貢獻者", - "Chuanhu-Small-and-Beautiful主题": "Chuanhu-小而美主題", - "等待Claude响应": "等待Claude響應", - "其他模型转化效果未知": "其他模型轉換效果未知", - "版权归原文作者所有": "版權歸原文作者所有", - "回答完问题后": "回答完問題後", - "请先上传文件素材": "請先上傳文件素材", - "上传本地文件/压缩包供函数插件调用": "上傳本地文件/壓縮包供函數插件調用", - "P.S. 顺便把Latex的注释去除": "P.S. 順便把Latex的註釋去除", - "您提供的api-key不满足要求": "您提供的api-key不滿足要求", - "切割音频文件": "切割音頻文件", - "对不同latex源文件扣分": "對不同latex源文件扣分", - "以下是一篇学术论文的基础信息": "以下是一篇學術論文的基礎信息", - "问题": "問題", - "待注入的知识库名称id": "待注入的知識庫名稱id", - "”的主要内容": "”的主要內容", - "获取设置": "獲取設置", - "str类型": "str類型", - "多线程": "多線程", - "尝试执行Latex指令失败": "嘗試執行Latex指令失敗", - "然后再写一段英文摘要": "然後再寫一段英文摘要", - "段音频的主要内容": "段音頻的主要內容", - "临时地激活代理网络": "臨時地激活代理網絡", - "网络的远程文件": "網絡的遠程文件", - "不能正常加载ChatGLMFT的参数!": "無法正常載入ChatGLMFT的參數!", - "正在编译PDF文档": "正在編譯PDF文件", - "等待ChatGLMFT响应中": "等待ChatGLMFT回應中", - "将": "將", - "片段": "片段", - "修复括号": "修復括號", - "条": "條", - "建议直接在API_KEY处填写": "建議直接在API_KEY處填寫", - "根据需要切换prompt": "根據需要切換prompt", - "使用": "使用", - "请输入要翻译成哪种语言": "請輸入要翻譯成哪種語言", - "实际得到格式": "實際得到格式", - "例如 f37f30e0f9934c34a992f6f64f7eba4f": "例如 f37f30e0f9934c34a992f6f64f7eba4f", - "请切换至“KnowledgeBaseQA”插件进行知识库访问": "請切換至“KnowledgeBaseQA”插件進行知識庫訪問", - "用户填3": "用戶填3", - "远程云服务器部署": "遠程雲服務器部署", - "未知指令": "未知指令", - "每个线程都要“喂狗”": "每個線程都要“喂狗”", - "该项目的Latex主文件是": "該項目的Latex主文件是", - "设置OpenAI密钥和模型": "設置OpenAI密鑰和模型", - "填入你亲手写的部署名": "填入你親手寫的部署名", - "仅调试": "僅調試", - "依赖不足": "依賴不足", - "右下角更换模型菜单中可切换openai": "右下角更換模型菜單中可切換openai", - "解析整个CSharp项目": "解析整個CSharp項目", - "唤起高级参数输入区": "喚起高級參數輸入區", - "这个bug没找到触发条件": "這個bug沒找到觸發條件", - "========================================= 插件主程序2 =====================================================": "========================================= 插件主程序2 =====================================================", - "经过充分测试": "經過充分測試", - "该文件中主要包含三个函数": "該文件中主要包含三個函數", - "您可以到Github Issue区": "您可以到Github Issue區", - "避免线程阻塞": "避免線程阻塞", - "吸收iffalse注释": "吸收iffalse註釋", - "from crazy_functions.虚空终端 import 终端": "from crazy_functions.虛空終端 import 終端", - "异步方法": "異步方法", - "块元提取": "塊元提取", - "Your account is not active. OpenAI以账户失效为由": "您的帳戶未啟用。OpenAI以帳戶失效為由", - "还原部分原文": "還原部分原文", - "如果要使用Claude": "如果要使用Claude", - "把文件复制过去": "把文件複製過去", - "解压失败! 需要安装pip install rarfile来解压rar文件": "解壓失敗!需要安裝pip install rarfile來解壓rar文件", - "正在锁定插件": "正在鎖定插件", - "输入 clear 以清空对话历史": "輸入 clear 以清空對話歷史", - "P.S. 但愿没人把latex模板放在里面传进来": "P.S. 但願沒人把latex模板放在裡面傳進來", - "实时音频采集": "實時音頻採集", - "开始最终总结": "開始最終總結", - "拒绝服务": "拒絕服務", - "配置教程&视频教程": "配置教程&視頻教程", - "所有音频都总结完成了吗": "所有音頻都總結完成了嗎", - "返回": "返回", - "避免不小心传github被别人看到": "避免不小心傳github被別人看到", - "否则将导致每个人的Claude问询历史互相渗透": "否則將導致每個人的Claude問詢歷史互相滲透", - "提问吧! 但注意": "提問吧!但注意", - "待处理的word文档路径": "待處理的word文檔路徑", - "欢迎加REAME中的QQ联系开发者": "歡迎加REAME中的QQ聯繫開發者", - "建议暂时不要使用": "建議暫時不要使用", - "Latex没有安装": "Latex沒有安裝", - "在这里放一些网上搜集的demo": "在這裡放一些網上搜集的demo", - "实现消息发送、接收等功能": "實現消息發送、接收等功能", - "用于与with语句一起使用": "用於與with語句一起使用", - "解压失败! 需要安装pip install py7zr来解压7z文件": "解壓失敗! 需要安裝pip install py7zr來解壓7z文件", - "借助此参数": "借助此參數", - "判定为数据流的结束": "判定為數據流的結束", - "提取文件扩展名": "提取文件擴展名", - "GPT结果已输出": "GPT結果已輸出", - "读取文件": "讀取文件", - "如果OpenAI不响应": "如果OpenAI不響應", - "输入部分太自由": "輸入部分太自由", - "用于给一小段代码上代理": "用於給一小段代碼上代理", - "输入 stop 以终止对话": "輸入 stop 以終止對話", - "这个paper有个input命令文件名大小写错误!": "這個paper有個input命令文件名大小寫錯誤!", - "等待Claude回复的片段": "等待Claude回復的片段", - "开始": "開始", - "将根据报错信息修正tex源文件并重试": "將根據報錯信息修正tex源文件並重試", - "建议更换代理协议": "建議更換代理協議", - "递归地切割PDF文件": "遞歸地切割PDF文件", - "读 docs\\use_azure.md": "讀 docs\\use_azure.md", - "参数": "參數", - "屏蔽空行和太短的句子": "屏蔽空行和太短的句子", - "分析上述回答": "分析上述回答", - "因为在同一个频道里存在多人使用时历史消息渗透问题": "因為在同一個頻道裡存在多人使用時歷史消息滲透問題", - "使用latexdiff生成論文轉化前後對比": "使用latexdiff生成論文轉化前後對比", - "檢查結果": "檢查結果", - "請在此處追加更細緻的校錯指令": "請在此處追加更細緻的校錯指令", - "報告如何遠程獲取": "報告如何遠程獲取", - "發現已經存在翻譯好的PDF文檔": "發現已經存在翻譯好的PDF文檔", - "插件鎖定中": "插件鎖定中", - "正在精細切分latex文件": "正在精細切分latex文件", - "數學GenerateAnimations": "數學GenerateAnimations", - "上傳文件自動修正路徑": "上傳文件自動修正路徑", - "請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期": "請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期", - "上傳Latex項目": "上傳LaTeX項目", - "Aliyun音頻服務異常": "Aliyun音頻服務異常", - "為了防止大語言模型的意外謬誤產生擴散影響": "為了防止大語言模型的意外謬誤產生擴散影響", - "調用Claude時": "調用Claude時", - "解除插件鎖定": "解除插件鎖定", - "暗色模式 / 亮色模式": "暗色模式 / 亮色模式", - "只有第二步成功": "只有第二步成功", - "分析结果": "分析結果", - "用第二人称": "使用第二人稱", - "详情见https": "詳情請見https", - "记住当前的label": "記住當前的標籤", - "当无法用标点、空行分割时": "當無法用標點符號、空行分割時", - "如果分析错误": "如果分析錯誤", - "如果有必要": "如果有必要", - "不要修改!! 高危设置!通过修改此设置": "不要修改!! 高危設置!通過修改此設置", - "ChatGLMFT消耗大量的内存": "ChatGLMFT消耗大量的內存", - "摘要生成后的文档路径": "摘要生成後的文件路徑", - "对全文进行概括": "對全文進行概述", - "LLM_MODEL是默认选中的模型": "LLM_MODEL是默認選中的模型", - "640个字节为一组": "640個字節為一組", - "获取关键词": "獲取關鍵詞", - "解析为简体中文": "解析為簡體中文", - "将 \\include 命令转换为 \\input 命令": "將 \\include 命令轉換為 \\input 命令", - "默认值为1000": "默認值為1000", - "手动指定语言": "手動指定語言", - "请登录OpenAI查看详情 https": "請登錄OpenAI查看詳情 https", - "尝试第": "嘗試第", - "每秒采样数量": "每秒採樣數量", - "加载失败!": "加載失敗!", - "方法": "方法", - "对这个人外貌、身处的环境、内心世界、过去经历进行描写": "對這個人外貌、身處的環境、內心世界、過去經歷進行描寫", - "请先将.doc文档转换为.docx文档": "請先將.doc文檔轉換為.docx文檔", - "定位主Latex文件": "定位主Latex文件", - "批量SummarizeAudioAndVideo": "批量摘要音视频", - "终端": "終端", - "即将退出": "即將退出", - "找不到": "找不到", - "正在听您讲话": "正在聆聽您講話", - "请您不要删除或修改这行警告": "請勿刪除或修改此警告", - "没有阿里云语音识别APPKEY和TOKEN": "沒有阿里雲語音識別APPKEY和TOKEN", - "临时地启动代理网络": "臨時啟動代理網絡", - "请尝试把以下指令复制到高级参数区": "請將以下指令複製到高級參數區", - "中文Bing版": "中文Bing版", - "计算文件总时长和切割点": "計算文件總時長和切割點", - "寻找主文件": "尋找主文件", - "jittorllms尚未加载": "jittorllms尚未加載", - "使用正则表达式查找半行注释": "使用正則表達式查找半行註釋", - "文档越长耗时越长": "文檔越長耗時越長", - "生成中文PDF": "生成中文PDF", - "写入文件": "寫入文件", - "第三组插件": "第三組插件", - "开始接收chatglmft的回复": "開始接收chatglmft的回覆", - "由于提问含不合规内容被Azure过滤": "由於提問含不合規內容被Azure過濾", - "安装方法https": "安裝方法https", - "是否自动处理token溢出的情况": "是否自動處理token溢出的情況", - "如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "如果需要使用AZURE 詳情請見額外文檔 docs\\use_azure.md", - "将要忽略匹配的文件后缀": "將要忽略匹配的文件後綴", - "authors获取失败": "authors獲取失敗", - "发送到openai音频解析终端": "發送到openai音頻解析終端", - "请开始多线程操作": "請開始多線程操作", - "对这个人外貌、身处的环境、内心世界、人设进行描写": "對這個人外貌、身處的環境、內心世界、人設進行描寫", - "MOSS can understand and communicate fluently in the language chosen by the user such as English and 中文. MOSS can perform any language-based tasks.": "MOSS可以流利地理解和使用用戶選擇的語言,例如英語和中文。MOSS可以執行任何基於語言的任務。", - "work_folder = Latex預處理": "設置工作目錄為Latex預處理", - "然後轉移到指定的另一個路徑中": "然後轉移到指定的另一個路徑中", - "使用Newbing": "使用Newbing", - "詳情信息見requirements.txt": "詳細信息請參閱requirements.txt", - "開始下載": "開始下載", - "多線程翻譯開始": "多線程翻譯開始", - "當前大語言模型": "當前大語言模型", - "格式如org-123456789abcdefghijklmno的": "格式如org-123456789abcdefghijklmno的", - "當下一次用戶提交時": "當下一次用戶提交時", - "需要特殊依賴": "需要特殊依賴", - "次編譯": "次編譯", - "先上傳數據集": "先上傳數據集", - "gpt寫的": "gpt寫的", - "調用緩存": "調用緩存", - "优先级1. 获取环境变量作为配置": "優先級1. 獲取環境變量作為配置", - "检查config中的AVAIL_LLM_MODELS选项": "檢查config中的AVAIL_LLM_MODELS選項", - "并且对于网络上的文件": "並且對於網絡上的文件", - "根据文本使用GPT模型生成相应的图像": "根據文本使用GPT模型生成相應的圖像", - "功能描述": "功能描述", - "翻译结果": "翻譯結果", - "需要预先pip install rarfile": "需要預先pip install rarfile", - "等待响应": "等待響應", - "我们剥离Introduction之后的部分": "我們剝離Introduction之後的部分", - "函数插件-固定按钮区": "函數插件-固定按鈕區", - "临时存储用于调试": "臨時存儲用於調試", - "比正文字体小": "比正文字體小", - "会直接转到该函数": "會直接轉到該函數", - "请以以下方式load模型!!!": "請以以下方式load模型!!!", - "请输入关键词": "請輸入關鍵詞", - "返回找到的第一个": "返回找到的第一個", - "高级参数输入区": "高級參數輸入區", - "精细切分latex文件": "精細切分latex文件", - "赋予插件锁定 锁定插件回调路径": "賦予插件鎖定 鎖定插件回調路徑", - "尝试下载": "嘗試下載", - "包含documentclass关键字": "包含documentclass關鍵字", - "在一个异步线程中采集音频": "在一個異步線程中採集音頻", - "先删除": "先刪除", - "则跳过GPT请求环节": "則跳過GPT請求環節", - "Not enough point. API2D账户点数不足": "Not enough point. API2D帳戶點數不足", - "如果一句话小于7个字": "如果一句話小於7個字", - "具备以下功能": "具備以下功能", - "请查看终端的输出或耐心等待": "請查看終端的輸出或耐心等待", - "对输入的word文档进行摘要生成": "對輸入的word文檔進行摘要生成", - "只读": "只讀", - "文本碎片重组为完整的tex文件": "文本碎片重組為完整的tex文件", - "通过调用conversations_open方法打开一个频道": "通過調用conversations_open方法打開一個頻道", - "对话历史文件损坏!": "對話歷史文件損壞!", - "再失败就没办法了": "再失敗就沒辦法了", - "原始PDF编译是否成功": "原始PDF編譯是否成功", - "不能正常加载jittorllms的参数!": "不能正常加載jittorllms的參數!", - "正在编译对比PDF": "正在編譯對比PDF", - "找不到微调模型检查点": "找不到微調模型檢查點", - "将生成的报告自动投射到文件上传区": "將生成的報告自動投射到文件上傳區", - "请对这部分内容进行语法矫正": "請對這部分內容進行語法校正", - "编译已经开始": "編譯已經開始", - "需要读取和清理文本的pdf文件路径": "需要讀取和清理文本的pdf文件路徑", - "读取文件内容到内存": "讀取文件內容到內存", - "用&符号分隔": "用&符號分隔", - "输入arxivID": "輸入arxivID", - "找 API_ORG 设置项": "找API_ORG設置項", - "分析用户提供的谷歌学术": "分析用戶提供的谷歌學術", - "欢迎使用 MOSS 人工智能助手!输入内容即可进行对话": "歡迎使用 MOSS 人工智能助手!輸入內容即可進行對話", - "段音频的第": "段音頻的第", - "没有找到任何可读取文件": "沒有找到任何可讀取文件", - "目前仅支持GPT3.5/GPT4": "目前僅支持GPT3.5/GPT4", - "为每一位访问的用户赋予一个独一无二的uuid编码": "為每一位訪問的用戶賦予一個獨一無二的uuid編碼", - "内含已经翻译的Tex文档": "內含已經翻譯的Tex文檔", - "消耗时间的函数": "消耗時間的函數", - "成功啦": "成功啦", - "环境变量配置格式见docker-compose.yml": "環境變量配置格式見docker-compose.yml", - "将每次对话记录写入Markdown格式的文件中": "將每次對話記錄寫入Markdown格式的文件中", - "报告已经添加到右侧“文件上传区”": "報告已經添加到右側“文件上傳區”", - "此处可以输入解析提示": "此處可以輸入解析提示", - "缺少MOSS的依赖": "缺少MOSS的依賴", - "仅在Windows系统进行了测试": "僅在Windows系統進行了測試", - "然后重启程序": "然後重啟程序", - "此处不修改": "此處不修改", - "输出html调试文件": "輸出html調試文件", - "6.25 加入判定latex模板的代码": "6.25 加入判定latex模板的代碼", - "提取总结": "提取總結", - "要求": "要求", - "由于最为关键的转化PDF编译失败": "由於最為關鍵的轉化PDF編譯失敗", - "除非您是论文的原作者": "除非您是論文的原作者", - "输入问题后点击该插件": "輸入問題後點擊該插件", - "该选项即将被弃用": "該選項即將被棄用", - "再列出用户可能提出的三个问题": "再列出用戶可能提出的三個問題", - "所有文件都总结完成了吗": "所有文件都總結完成了嗎", - "请稍候": "請稍候", - "向chatbot中添加简单的意外错误信息": "向chatbot中添加簡單的意外錯誤信息", - "快捷的调试函数": "快捷的調試函數", - "LatexEnglishCorrection+高亮修正位置": "Latex英文校正+高亮修正位置", - "循环监听已打开频道的消息": "循環監聽已打開頻道的消息", - "将指定目录下的PDF文件从英文翻译成中文": "將指定目錄下的PDF文件從英文翻譯成中文", - "请对下面的音频片段做概述": "請對下面的音頻片段做概述", - "openai的官方KEY需要伴隨组织编码": "openai的官方KEY需要伴隨組織編碼", - "表示频道ID": "頻道ID", - "当前支持的格式包括": "目前支援的格式包括", - "只有GenerateImage和生成图像相关": "僅限GenerateImage和生成圖像相關", - "删除中间文件夹": "刪除中間資料夾", - "解除插件状态": "解除插件狀態", - "正在预热文本向量化模组": "正在預熱文本向量化模組", - "100字以内": "限制100字內", - "如果缺少依赖": "如果缺少相依性", - "寻找主tex文件": "尋找主要tex檔案", - "gpt 多线程请求": "gpt 多線程請求", - "已知某些代码的局部作用是": "已知某些程式碼的局部作用是", - "--读取文件": "--讀取檔案", - "前面是中文冒号": "前面是中文冒號", - "*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{警告", - "OpenAI所允许的最大并行过载": "OpenAI所允許的最大並行過載", - "请直接去该路径下取回翻译结果": "請直接前往該路徑取回翻譯結果", - "以免输入溢出": "以免輸入溢出", - "把某个路径下所有文件压缩": "壓縮某個路徑下的所有檔案", - "问询记录": "詢問記錄", - "Tex源文件缺失!": "Tex原始檔案遺失!", - "当前参数": "目前參數", - "处理markdown文本格式的转变": "處理markdown文本格式的轉換", - "尝试加载": "嘗試載入", - "请在此处给出自定义翻译命令": "請在此處提供自訂翻譯命令", - "这需要一段时间计算": "這需要一段時間計算", - "-构建知识库": "-建立知識庫", - "还需要填写组织": "還需要填寫組織", - "当前知识库内的有效文件": "當前知識庫內的有效文件", - "第一次调用": "第一次調用", - "从一批文件": "從一批文件", - "json等": "json等", - "翻译-": "翻譯-", - "编译文献交叉引用": "編譯文獻交叉引用", - "优先级2. 获取config_private中的配置": "優先級2. 獲取config_private中的配置", - "可选": "可選", - "我们": "我們", - "编译结束": "編譯結束", - "或代理节点": "或代理節點", - "chatGPT 分析报告": "chatGPT 分析報告", - "调用openai api 使用whisper-1模型": "調用openai api 使用whisper-1模型", - "这段代码定义了一个名为TempProxy的空上下文管理器": "這段代碼定義了一個名為TempProxy的空上下文管理器", - "生成的视频文件路径": "生成的視頻文件路徑", - "请直接提交即可": "請直接提交即可", - "=================================== 工具函数 ===============================================": "=================================== 工具函數 ===============================================", - "报错信息如下. 如果是与网络相关的问题": "報錯信息如下. 如果是與網絡相關的問題", - "python 版本建议3.9+": "python 版本建議3.9+", - "多线程函数插件中": "多線程函數插件中", - "对话助手函数插件": "對話助手函數插件", - "或者重启之后再度尝试": "或者重啟之後再度嘗試", - "拆分过长的latex片段": "拆分過長的latex片段", - "调用whisper模型音频转文字": "調用whisper模型音頻轉文字", - "失败啦": "失敗啦", - "正在编译PDF": "正在編譯PDF", - "请刷新界面重试": "請刷新界面重試", - "模型参数": "模型參數", - "写出文件": "寫出文件", - "第二组插件": "第二組插件", - "在多Tex文档中": "在多Tex文檔中", - "有线程锁": "有線程鎖", - "释放线程锁": "釋放線程鎖", - "读取优先级": "讀取優先級", - "Linux下必须使用Docker安装": "Linux下必須使用Docker安裝", - "例如您可以将以下命令复制到下方": "例如您可以將以下命令複製到下方", - "导入依赖失败": "導入依賴失敗", - "给出一些判定模板文档的词作为扣分项": "給出一些判定模板文檔的詞作為扣分項", - "等待Claude响应中": "等待Claude響應中", - "Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数": "Call ChatGLMFT fail 不能正常加載ChatGLMFT的參數", - "但本地存储了以下历史文件": "但本地存儲了以下歷史文件", - "如果存在调试缓存文件": "如果存在調試緩存文件", - "如果这里抛出异常": "如果這裡拋出異常", - "详见项目主README.md": "詳見項目主README.md", - "作者": "作者", - "现在您点击任意“红颜色”标识的函数插件时": "現在您點擊任意“紅顏色”標識的函數插件時", - "上下文管理器必须实现两个方法": "上下文管理器必須實現兩個方法", - "匹配^数字^": "匹配^數字^", - "也是可读的": "也是可讀的", - "将音频解析为简体中文": "將音頻解析為簡體中文", - "依次访问网页": "依次訪問網頁", - "P.S. 顺便把CTEX塞进去以支持中文": "P.S. 順便把CTEX塞進去以支持中文", - "NewBing响应异常": "NewBing響應異常", - "获取已打开频道的最新消息并返回消息列表": "獲取已打開頻道的最新消息並返回消息列表", - "请使用Markdown": "請使用Markdown", - "例如 RoPlZrM88DnAFkZK": "例如 RoPlZrM88DnAFkZK", - "编译BibTex": "編譯BibTex", - "Claude失败": "Claude失敗", - "请更换为API_URL_REDIRECT配置": "請更換為API_URL_REDIRECT配置", - "P.S. 其他可用的模型还包括": "P.S. 其他可用的模型還包括", - "色彩主体": "色彩主體", - "后面是英文逗号": "後面是英文逗號", - "下载pdf文件未成功": "下載pdf文件未成功", - "删除整行的空注释": "刪除整行的空注釋", - "吸收匿名公式": "吸收匿名公式", - "从而更全面地理解项目的整体功能": "從而更全面地理解項目的整體功能", - "不需要再次转化": "不需要再次轉化", - "可以将自身的状态存储到cookie中": "可以將自身的狀態存儲到cookie中", - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开": "1、英文題目;2、中文題目翻譯;3、作者;4、arxiv公開", - "GPT 学术优化": "GPT 學術優化", - "解析整个Python项目": "解析整個Python項目", - "吸收其他杂项": "吸收其他雜項", - "-预热文本向量化模组": "-預熱文本向量化模組", - "Claude组件初始化成功": "Claude組件初始化成功", - "此处填API密钥": "此處填API密鑰", - "请继续分析其他源代码": "請繼續分析其他源代碼", - "质能方程式": "質能方程式", - "功能尚不稳定": "功能尚不穩定", - "使用教程详情见 request_llms/README.md": "使用教程詳情見 request_llms/README.md", - "从以上搜索结果中抽取信息": "從以上搜索結果中抽取信息", - "虽然PDF生成失败了": "雖然PDF生成失敗了", - "找图片": "尋找圖片", - "还原原文": "還原原文", - "可调节线程池的大小避免openai的流量限制错误": "可調整線程池大小以避免openai流量限制錯誤", - "正在提取摘要并下载PDF文档……": "正在提取摘要並下載PDF文件......", - "缺少ChatGLMFT的依赖": "缺少ChatGLMFT的依賴", - "不会实时显示在界面上": "不會即時顯示在界面上", - "解决部分词汇翻译不准确的问题": "解決部分詞彙翻譯不準確的問題", - "等待多线程操作": "等待多線程操作", - "吸收title与作者以上的部分": "吸收標題與作者以上的部分", - "如果需要使用Slack Claude": "如果需要使用Slack Claude", - "一、论文概况": "一、論文概況", - "默认为Chinese": "默認為中文", - "图像生成所用到的提示文本": "圖像生成所用到的提示文本", - "向已打开的频道发送一条文本消息": "向已打開的頻道發送一條文本消息", - "如果某个子任务出错": "如果某個子任務出錯", - "chatglmft 没有 sys_prompt 接口": "chatglmft沒有sys_prompt接口", - "对比PDF编译是否成功": "對比PDF編譯是否成功", - "免费": "免費", - "请讲话": "請講話", - "安装ChatGLM的依赖": "安裝ChatGLM的依賴", - "对IPynb文件进行解析": "對IPynb文件進行解析", - "文件路径列表": "文件路徑列表", - "或者使用此插件继续上传更多文件": "或者使用此插件繼續上傳更多文件", - "随机负载均衡": "隨機負載均衡", - "!!!如果需要运行量化版本": "!!!如果需要運行量化版本", - "注意目前不能多人同时调用Claude接口": "注意目前不能多人同時調用Claude接口", - "文件读取完成": "文件讀取完成", - "用于灵活调整复杂功能的各种参数": "用於靈活調整複雜功能的各種參數", - "**函数功能**": "**函數功能**", - "先切换模型到openai或api2d": "先切換模型到openai或api2d", - "You are associated with a deactivated account. OpenAI以账户失效为由": "您的帳戶已停用。OpenAI以帳戶失效為由", - "你的回答必须简单明了": "您的回答必須簡單明了", - "是否丢弃掉 不是正文的内容": "是否丟棄掉 不是正文的內容", - "但请查收结果": "但請查收結果", - "Claude响应缓慢": "Claude響應緩慢", - "需Latex": "需Latex", - "Claude回复的片段": "Claude回復的片段", - "如果要使用ChatGLMFT": "如果要使用ChatGLMFT", - "它*必须*被包含在AVAIL_LLM_MODELS列表中": "它*必須*被包含在AVAIL_LLM_MODELS列表中", - "前面是中文逗号": "前面是中文逗號", - "需要预先pip install py7zr": "需要預先pip install py7zr", - "将前后断行符脱离": "將前後斷行符脫離", - "防止丢失最后一条消息": "防止丟失最後一條消息", - "初始化插件状态": "初始化插件狀態", - "以秒为单位": "以秒為單位", - "中文Latex项目全文润色": "中文Latex項目全文潤色", - "对整个Latex项目进行纠错": "對整個Latex項目進行校對", - "NEWBING_COOKIES未填写或有格式错误": "NEWBING_COOKIES未填寫或有格式錯誤", - "函数插件作者": "函數插件作者", - "结束": "結束", - "追加历史": "追加歷史", - "您需要首先调用构建知识库": "您需要首先調用構建知識庫", - "如果程序停顿5分钟以上": "如果程序停頓5分鐘以上", - "ChatGLMFT响应异常": "ChatGLMFT響應異常", - "根据当前的模型类别": "根據當前的模型類別", - "才能继续下面的步骤": "才能繼續下面的步驟", - "并将返回的频道ID保存在属性CHANNEL_ID中": "並將返回的頻道ID保存在屬性CHANNEL_ID中", - "请查收结果": "請查收結果", - "解决插件锁定时的界面显示问题": "解決插件鎖定時的界面顯示問題", - "待提取的知识库名称id": "待提取的知識庫名稱id", - "Claude响应异常": "Claude響應異常", - "当前代理可用性": "當前代理可用性", - "代理网络配置": "代理網絡配置", - "我将为您查找相关壁纸": "我將為您查找相關壁紙", - "没给定指令": "沒給定指令", - "音频内容是": "音頻內容是", - "用该压缩包+ConversationHistoryArchive进行反馈": "用該壓縮包+ConversationHistoryArchive進行反饋", - "总结音频": "總結音頻", - "等待用户的再次调用": "等待用戶的再次調用", - "永远给定None": "永遠給定None", - "论文概况": "論文概況", - "建议使用英文单词": "建議使用英文單詞", - "刷新Gradio前端界面": "刷新Gradio前端界面", - "列表递归接龙": "列表遞歸接龍", - "赋予插件状态": "賦予插件狀態", - "构建完成": "構建完成", - "避免多用户干扰": "避免多用戶干擾", - "当前工作路径为": "當前工作路徑為", - "用黑色标注转换区": "用黑色標注轉換區", - "压缩包": "壓縮包", - "刷新页面即可以退出KnowledgeBaseQA模式": "刷新頁面即可以退出KnowledgeBaseQA模式", - "拆分过长的Markdown文件": "拆分過長的Markdown文件", - "生成时间戳": "生成時間戳", - "尚未完成全部响应": "尚未完成全部響應", - "HotReload的装饰器函数": "HotReload的裝飾器函數", - "请务必用 pip install -r requirements.txt 指令安装依赖": "請務必用 pip install -r requirements.txt 指令安裝依賴", - "TGUI不支持函数插件的实现": "TGUI不支持函數插件的實現", - "音频文件名": "音頻文件名", - "找不到任何音频或视频文件": "找不到任何音頻或視頻文件", - "音频解析结果": "音頻解析結果", - "如果使用ChatGLM2微调模型": "如果使用ChatGLM2微調模型", - "限制的3/4时": "限制的3/4時", - "获取回复": "獲取回復", - "对话历史写入": "對話歷史寫入", - "记录删除注释后的文本": "記錄刪除註釋後的文本", - "整理结果为压缩包": "整理結果為壓縮包", - "注意事项": "注意事項", - "请耐心等待": "請耐心等待", - "在执行完成之后": "在執行完成之後", - "参数简单": "參數簡單", - "Arixv论文精细翻译": "Arixv論文精細翻譯", - "备份和下载": "備份和下載", - "当前报错的latex代码处于第": "當前報錯的latex代碼處於第", - "Markdown翻译": "Markdown翻譯", - "英文Latex项目全文纠错": "英文Latex項目全文校對", - "获取预处理函数": "獲取預處理函數", - "add gpt task 创建子线程请求gpt": "add gpt task 創建子線程請求gpt", - "一个包含所有切割音频片段文件路径的列表": "一個包含所有切割音頻片段文件路徑的列表", - "解析arxiv网址失败": "解析arxiv網址失敗", - "PDF文件所在的路径": "PDF文件所在路徑", - "取评分最高者返回": "取評分最高者返回", - "此插件处于开发阶段": "此插件處於開發階段", - "如果已经存在": "如果已經存在", - "或者不在环境变量PATH中": "或者不在環境變量PATH中", - "目前支持的格式": "目前支持的格式", - "将多文件tex工程融合为一个巨型tex": "將多文件tex工程融合為一個巨型tex", - "暂不提交": "暫不提交", - "调用函数": "調用函數", - "编译转化后的PDF": "編譯轉化後的PDF", - "将代码转为动画": "將代碼轉為動畫", - "本地Latex论文精细翻译": "本地Latex論文精細翻譯", - "删除或修改歧义文件": "刪除或修改歧義文件", - "其他操作系统表现未知": "其他操作系統表現未知", - "此插件Windows支持最佳": "此插件Windows支持最佳", - "构建知识库": "構建知識庫", - "每个切割音频片段的时长": "每個切割音頻片段的時長", - "用latex编译为PDF对修正处做高亮": "用latex編譯為PDF對修正處做高亮", - "行": "行", - "= 2 通过一些Latex模板中常见": "= 2 通過一些Latex模板中常見", - "如参考文献、脚注、图注等": "如參考文獻、腳註、圖註等", - "期望格式例如": "期望格式例如", - "翻译内容可靠性无保障": "翻譯內容可靠性無保障", - "请用一句话概括这些文件的整体功能": "請用一句話概括這些文件的整體功能", - "段音频完成了吗": "段音頻完成了嗎", - "填入azure openai api的密钥": "填入azure openai api的密鑰", - "文本碎片重组为完整的tex片段": "文本碎片重組為完整的tex片段", - "吸收在42行以內的begin-end組合": "吸收在42行以內的begin-end組合", - "屬性": "屬性", - "必須包含documentclass": "必須包含documentclass", - "等待GPT響應": "等待GPT響應", - "當前語言模型溫度設定": "當前語言模型溫度設定", - "模型選擇是": "選擇的模型為", - "reverse 操作必須放在最後": "reverse 操作必須放在最後", - "將子線程的gpt結果寫入chatbot": "將子線程的gpt結果寫入chatbot", - "默認為default": "默認為default", - "目前對機器學習類文獻轉化效果最好": "目前對機器學習類文獻轉化效果最好", - "主程序即將開始": "主程序即將開始", - "點擊“停止”鍵可終止程序": "點擊“停止”鍵可終止程序", - "正在處理": "正在處理", - "請立即終止程序": "請立即停止程序", - "將 chatglm 直接對齊到 chatglm2": "將 chatglm 直接對齊到 chatglm2", - "音頻助手": "音頻助手", - "正在構建知識庫": "正在構建知識庫", - "請向下翻": "請向下滾動頁面", - "後面是英文冒號": "後面是英文冒號", - "無法找到一個主Tex文件": "無法找到一個主Tex文件", - "使用中文总结音频“": "使用中文總結音頻", - "该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "該PDF由GPT-Academic開源項目調用大語言模型+Latex翻譯插件一鍵生成", - "开始生成动画": "開始生成動畫", - "完成情况": "完成情況", - "然后进行问答": "然後進行問答", - "为啥chatgpt会把cite里面的逗号换成中文逗号呀": "為啥chatgpt會把cite裡面的逗號換成中文逗號呀", - "暂时不支持历史消息": "暫時不支持歷史消息", - "项目Github地址 \\url{https": "項目Github地址 \\url{https", - "Newbing 请求失败": "Newbing 請求失敗", - "根据自然语言执行插件命令": "根據自然語言執行插件命令", - "迭代上一次的结果": "迭代上一次的結果", - "azure和api2d请求源": "azure和api2d請求源", - "格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "格式如org-xxxxxxxxxxxxxxxxxxxxxxxx", - "推荐http": "推薦http", - "将要匹配的模式": "將要匹配的模式", - "代理数据解析失败": "代理數據解析失敗", - "创建存储切割音频的文件夹": "創建存儲切割音頻的文件夾", - "用红色标注处保留区": "用紅色標注處保留區", - "至少一个线程任务Token溢出而失败": "至少一個線程任務Token溢出而失敗", - "获取Slack消息失败": "獲取Slack消息失敗", - "极少数情况下": "極少數情況下", - "辅助gpt生成代码": "輔助gpt生成代碼", - "生成图像": "生成圖像", - "最多收纳多少个网页的结果": "最多收納多少個網頁的結果", - "获取图片URL": "獲取圖片URL", - "正常状态": "正常狀態", - "编译原始PDF": "編譯原始PDF", - "SummarizeAudioAndVideo内容": "音視頻摘要內容", - "Latex文件融合完成": "Latex文件融合完成", - "获取线程锁": "獲取線程鎖", - "SlackClient类用于与Slack API进行交互": "SlackClient類用於與Slack API進行交互", - "检测到arxiv文档连接": "檢測到arxiv文檔連接", - "--读取参数": "--讀取參數", - "如果您是论文原作者": "如果您是論文原作者", - "5刀": "5美元", - "转化PDF编译是否成功": "轉換PDF編譯是否成功", - "生成带有段落标签的HTML代码": "生成帶有段落標籤的HTML代碼", - "目前不支持历史消息查询": "目前不支持歷史消息查詢", - "将文件添加到chatbot cookie中": "將文件添加到chatbot cookie中", - "多线程操作已经开始": "多線程操作已經開始", - "请求子进程": "請求子進程", - "将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词": "將Unsplash API中的PUT_YOUR_QUERY_HERE替換成描述該事件的一個最重要的單詞", - "不能加载Claude组件": "不能加載Claude組件", - "请仔细鉴别并以原文为准": "請仔細鑒別並以原文為準", - "否则结束循环": "否則結束循環", - "插件可读取“输入区”文本/路径作为参数": "插件可讀取“輸入區”文本/路徑作為參數", - "网络错误": "網絡錯誤", - "想象一个穿着者": "想像一個穿著者", - "避免遗忘导致死锁": "避免遺忘導致死鎖", - "保证括号正确": "保證括號正確", - "报错信息": "錯誤信息", - "提取视频中的音频": "提取視頻中的音頻", - "初始化音频采集线程": "初始化音頻採集線程", - "参考文献转Bib": "參考文獻轉Bib", - "阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "阿里云即時語音識別配置難度較高,僅建議高手用戶使用,參考 https", - "使用时": "使用時", - "处理个别特殊插件的锁定状态": "處理個別特殊插件的鎖定狀態", - "但通常不会出现在正文": "但通常不會出現在正文", - "此函数逐渐地搜索最长的条目进行剪辑": "此函數逐漸地搜索最長的條目進行剪輯", - "给出指令": "給出指令", - "读取音频文件": "讀取音頻文件", - "========================================= 插件主程序1 =====================================================": "========================================= 插件主程序1 =====================================================", - "带超时倒计时": "帶超時倒計時", - "禁止移除或修改此警告": "禁止移除或修改此警告", - "ChatGLMFT尚未加载": "ChatGLMFT尚未加載", - "双手离开鼠标键盘吧": "雙手離開鼠標鍵盤吧", - "缺少的依赖": "缺少的依賴", - "的单词": "的單詞", - "中读取数据构建知识库": "中讀取數據構建知識庫", - "函数热更新是指在不停止程序运行的情况下": "函數熱更新是指在不停止程序運行的情況下", - "建议低于1": "建議低於1", - "转化PDF编译已经成功": "轉換PDF編譯已經成功", - "出问题了": "出問題了", - "欢迎使用 MOSS 人工智能助手!": "歡迎使用 MOSS 人工智能助手!", - "正在精细切分latex文件": "正在精細切分LaTeX文件", - "”补上": "”補上", - "网络代理状态": "網路代理狀態", - "依赖检测通过": "依賴檢測通過", - "默认为default": "預設為default", - "Call MOSS fail 不能正常加载MOSS的参数": "呼叫MOSS失敗,無法正常載入MOSS參數", - "音频助手": "音頻助手", - "次编译": "次編譯", - "其他错误": "其他錯誤", - "属性": "屬性", - "主程序即将开始": "主程式即將開始", - "Aliyun音频服务异常": "Aliyun音頻服務異常", - "response中会携带traceback报错信息": "response中會攜帶traceback錯誤信息", - "一些普通功能模块": "一些普通功能模組", - "和openai的连接容易断掉": "和openai的連線容易斷掉", - "请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期": "請檢查ALIYUN_TOKEN和ALIYUN_APPKEY是否過期", - "调用Claude时": "呼叫Claude時", - "插件锁定中": "插件鎖定中", - "将子线程的gpt结果写入chatbot": "將子線程的gpt結果寫入chatbot", - "当下一次用户提交时": "當下一次使用者提交時", - "先上传数据集": "先上傳資料集", - "请在此处追加更细致的矫错指令": "請在此處追加更細緻的矯錯指令", - "无法找到一个主Tex文件": "無法找到一個主Tex文件", - "gpt写的": "gpt寫的", - "预处理": "預處理", - "但大部分场合下并不需要修改": "但大部分場合下並不需要修改", - "正在构建知识库": "正在建構知識庫", - "开始请求": "開始請求", - "根据以上分析": "根據以上分析", - "需要特殊依赖": "需要特殊依賴", - "用于基础的对话功能": "用於基礎的對話功能", - "且没有代码段": "且沒有程式碼段", - "取决于": "取決於", - "openai的官方KEY需要伴隨組織編碼": "請填入組織編碼", - "等待newbing回覆的片段": "等待newbing回覆的片段", - "调用缓存": "呼叫快取", - "模型选择是": "模型選擇為", - "当前大语言模型": "當前大語言模型", - "然后转移到指定的另一个路径中": "然後轉移到指定的另一個路徑中", - "请向下翻": "請向下滾動", - "内容太长了都会触发token数量溢出的错误": "內容太長會觸發token數量溢出的錯誤", - "每一块": "每一塊", - "详情信息见requirements.txt": "詳細信息見requirements.txt", - "没有提供高级参数功能说明": "沒有提供高級參數功能說明", - "上传Latex项目": "上傳Latex項目", - "请立即终止程序": "請立即終止程式", - "解除插件锁定": "解除插件鎖定", - "意外Json结构": "意外Json結構", - "必须包含documentclass": "必須包含documentclass", - "10个文件为一组": "10個文件為一組", - "openai的官方KEY需要伴随组织编码": "openai的官方KEY需要伴隨組織編碼", - "重置文件的创建时间": "重置文件的創建時間", - "尽量是完整的一个section": "盡量是完整的一個section", - "报告如何远程获取": "報告如何遠程獲取", - "work_folder = Latex预处理": "work_folder = Latex預處理", - "吸收在42行以内的begin-end组合": "吸收在42行以內的begin-end組合", - "后面是英文冒号": "後面是英文冒號", - "使用latexdiff生成论文转化前后对比": "使用latexdiff生成論文轉化前後對比", - "首先你在英文语境下通读整篇论文": "首先你在英文語境下通讀整篇論文", - "为了防止大语言模型的意外谬误产生扩散影响": "為了防止大語言模型的意外謬誤產生擴散影響", - "发现已经存在翻译好的PDF文档": "發現已經存在翻譯好的PDF文檔", - "点击“停止”键可终止程序": "點擊“停止”鍵可終止程序", - "数学GenerateAnimations": "數學GenerateAnimations", - "随变按钮的回调函数注册": "隨變按鈕的回調函數註冊", - "history至少释放二分之一": "history至少釋放二分之一", - "当前语言模型温度设定": "當前語言模型溫度設定", - "等待GPT响应": "等待GPT響應", - "正在处理": "正在處理", - "多线程翻译开始": "多線程翻譯開始", - "reverse 操作必须放在最后": "reverse 操作必須放在最後", - "等待newbing回复的片段": "等待newbing回覆的片段", - "开始下载": "開始下載", - "将 chatglm 直接对齐到 chatglm2": "將 chatglm 直接對齊到 chatglm2", - "以上材料已经被写入": "以上材料已經被寫入", - "上传文件自动修正路径": "上傳文件自動修正路徑", - "然后请使用Markdown格式封装": "然後請使用Markdown格式封裝", - "目前对机器学习类文献转化效果最好": "目前對機器學習類文獻轉化效果最好", - "检查结果": "檢查結果", - "、地址": "地址", - "如.md": "如.md", - "使用Unsplash API": "使用Unsplash API", - "**输入参数说明**": "**輸入參數說明**", - "新版本可用": "新版本可用", - "找不到任何python文件": "找不到任何python文件", - "知乎": "知乎", - "日": "日", - "“喂狗”": "“喂狗”", - "第4步": "第4步", - "退出": "退出", - "使用 Unsplash API": "使用 Unsplash API", - "非Openai官方接口返回了错误": "非Openai官方接口返回了错误", - "用来描述你的要求": "用來描述你的要求", - "自定义API KEY格式": "自定義API KEY格式", - "前缀": "前綴", - "会被加在你的输入之前": "會被加在你的輸入之前", - "api2d等请求源": "api2d等請求源", - "高危设置! 常规情况下不要修改! 通过修改此设置": "高危設置!常規情況下不要修改!通過修改此設置", - "即将编译PDF": "即將編譯PDF", - "默认 secondary": "默認 secondary", - "正在从github下载资源": "正在從github下載資源", - "响应异常": "響應異常", - "我好!": "我好!", - "无需填写": "無需填寫", - "缺少": "缺少", - "请问什么是质子": "請問什麼是質子", - "如果要使用": "如果要使用", - "重组": "重組", - "一个单实例装饰器": "一個單實例裝飾器", - "的参数!": "的參數!", - "🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行": "🏃‍♂️🏃‍♂️🏃‍♂️ 子進程執行", - "失败时": "失敗時", - "没有设置ANTHROPIC_API_KEY选项": "沒有設置ANTHROPIC_API_KEY選項", - "并设置参数": "並設置參數", - "格式": "格式", - "按钮是否可见": "按鈕是否可見", - "即可见": "即可見", - "创建request": "創建request", - "的依赖": "的依賴", - "⭐主进程执行": "⭐主進程執行", - "最后一步处理": "最後一步處理", - "没有设置ANTHROPIC_API_KEY": "沒有設置ANTHROPIC_API_KEY", - "的参数": "的參數", - "逆转出错的段落": "逆轉出錯的段落", - "本项目现已支持OpenAI和Azure的api-key": "本項目現已支持OpenAI和Azure的api-key", - "前者是API2D的结束条件": "前者是API2D的結束條件", - "增强稳健性": "增強穩健性", - "消耗大量的内存": "消耗大量的內存", - "您的 API_KEY 不满足任何一种已知的密钥格式": "您的API_KEY不滿足任何一種已知的密鑰格式", - "⭐单线程方法": "⭐單線程方法", - "是否在触发时清除历史": "是否在觸發時清除歷史", - "⭐多线程方法": "多線程方法", - "不能正常加载": "無法正常加載", - "举例": "舉例", - "即不处理之前的对话历史": "即不處理之前的對話歷史", - "尚未加载": "尚未加載", - "防止proxies单独起作用": "防止proxies單獨起作用", - "默认 False": "默認 False", - "检查USE_PROXY": "檢查USE_PROXY", - "响应中": "響應中", - "扭转的范围": "扭轉的範圍", - "后缀": "後綴", - "调用": "調用", - "创建AcsClient实例": "創建AcsClient實例", - "安装": "安裝", - "会被加在你的输入之后": "會被加在你的輸入之後", - "配合前缀可以把你的输入内容用引号圈起来": "配合前綴可以把你的輸入內容用引號圈起來", - "例如翻译、解释代码、润色等等": "例如翻譯、解釋代碼、潤色等等", - "后者是OPENAI的结束条件": "後者是OPENAI的結束條件", - "标注节点的行数范围": "標註節點的行數範圍", - "默认 True": "默認 True", - "将两个PDF拼接": "將兩個PDF拼接" -} diff --git a/docs/use_audio.md b/docs/use_audio.md deleted file mode 100644 index 0889325c9242e78d9fbf42704b0f3d8c61b18fb4..0000000000000000000000000000000000000000 --- a/docs/use_audio.md +++ /dev/null @@ -1,63 +0,0 @@ -# 使用音频交互功能 - - -## 1. 安装额外依赖 -``` -pip install --upgrade pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git -``` - -如果因为特色网络问题导致上述命令无法执行: -1. git clone alibabacloud-nls-python-sdk这个项目(或者直接前往Github对应网址下载压缩包). -命令行输入: `git clone https://github.com/aliyun/alibabacloud-nls-python-sdk.git` -1. 进入alibabacloud-nls-python-sdk目录命令行输入:`python setup.py install` - - -## 2. 配置音频功能开关 和 阿里云APPKEY(config.py/config_private.py/环境变量) - -- 注册阿里云账号 -- 开通 智能语音交互 (有免费白嫖时长) -- 获取token和appkey -- 未来将逐步用其他更廉价的云服务取代阿里云 - -``` -ENABLE_AUDIO = True -ALIYUN_TOKEN = "554a50fcd0bb476c8d07bb630e94d20c" # 此token已经失效 -ALIYUN_APPKEY = "RoPlZrM88DnAFkZK" # 此appkey已经失效 -``` - -参考 https://help.aliyun.com/document_detail/450255.html -先有阿里云开发者账号,登录之后,需要开通 智能语音交互 的功能,可以免费获得一个token,然后在 全部项目 中,创建一个项目,可以获得一个appkey. - -- 进阶功能 -进一步填写ALIYUN_ACCESSKEY和ALIYUN_SECRET实现自动获取ALIYUN_TOKEN -``` -ALIYUN_APPKEY = "RoP1ZrM84DnAFkZK" -ALIYUN_TOKEN = "" -ALIYUN_ACCESSKEY = "LTAI5q6BrFUzoRXVGUWnekh1" -ALIYUN_SECRET = "eHmI20AVWIaQZ0CiTD2bGQVsaP9i68" -``` - - -## 3.启动 - -启动gpt-academic `python main.py` - -## 4.点击record from microphe,授权音频采集 - -I 如果需要监听自己说话(不监听电脑音频),直接在浏览器中选择对应的麦即可 - -II 如果需要监听电脑音频(不监听自己说话),需要安装`VB-Audio VoiceMeeter`,打开声音控制面板(sound control panel) -- 1 `[把电脑的所有外放声音用VoiceMeeter截留]` 在输出区(playback)选项卡,把VoiceMeeter Input虚拟设备set as default设为默认播放设备。 -- 2 `[把截留的声音释放到gpt-academic]` 打开gpt-academic主界面,授权音频采集后,在浏览器地址栏或者类似的地方会出现一个麦克风图标,打开后,按照浏览器的提示,选择VoiceMeeter虚拟麦克风。然后刷新页面,重新授权音频采集。 -- 3 `[把截留的声音同时释放到耳机或音响]` 完成第一步之后,您应处于听不到电脑声音的状态。为了在截获音频的同时,避免影响正常使用,请完成这最后一步配置。在声音控制面板(sound control panel)输入区(recording)选项卡,把VoiceMeeter Output虚拟设备set as default。双击进入VoiceMeeter Output虚拟设备的设置。 - - 3-1 进入VoiceMeeter Output虚拟设备子菜单,打开listen选项卡。 - - 3-2 勾选Listen to this device。 - - 3-3 在playback through this device下拉菜单中选择你的正常耳机或音响。 - -III `[把特殊软件(如腾讯会议)的外放声音用VoiceMeeter截留]` 在完成步骤II的基础上,在特殊软件(如腾讯会议)中,打开声音菜单,选择扬声器VoiceMeeter Input,选择麦克风为正常耳机麦。 - -VI 两种音频监听模式切换时,需要刷新页面才有效。 - -VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434 - -## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能 diff --git a/docs/use_azure.md b/docs/use_azure.md deleted file mode 100644 index 0e192ba6a215b834b5645bb8d48bba7883153240..0000000000000000000000000000000000000000 --- a/docs/use_azure.md +++ /dev/null @@ -1,164 +0,0 @@ -# 微软Azure云接入指南 - -## 方法一(旧方法,只能接入一个Azure模型) - -- 通过以下教程,获取AZURE_ENDPOINT,AZURE_API_KEY,AZURE_ENGINE,直接修改 config 配置即可。配置的修改方法见本项目wiki。 - -## 方法二(新方法,接入多个Azure模型,并支持动态切换) - -- 在方法一的基础上,注册并获取多组 AZURE_ENDPOINT,AZURE_API_KEY,AZURE_ENGINE -- 修改config中的AZURE_CFG_ARRAY和AVAIL_LLM_MODELS配置项,按照格式填入多个Azure模型的配置,如下所示: - -``` -AZURE_CFG_ARRAY = { - "azure-gpt-3.5": # 第一个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-3.5"加入AVAIL_LLM_MODELS(模型下拉菜单) - { - "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", - "AZURE_API_KEY": "cccccccccccccccccccccccccccccccc", - "AZURE_ENGINE": "填入你亲手写的部署名1", - "AZURE_MODEL_MAX_TOKEN": 4096, - }, - "azure-gpt-4": # 第二个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-4"加入AVAIL_LLM_MODELS(模型下拉菜单) - { - "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", - "AZURE_API_KEY": "dddddddddddddddddddddddddddddddd", - "AZURE_ENGINE": "填入你亲手写的部署名2", - "AZURE_MODEL_MAX_TOKEN": 8192, - }, - "azure-gpt-3.5-16k": # 第三个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-3.5-16k"加入AVAIL_LLM_MODELS(模型下拉菜单) - { - "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", - "AZURE_API_KEY": "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", - "AZURE_ENGINE": "填入你亲手写的部署名3", - "AZURE_MODEL_MAX_TOKEN": 16384, - }, -} -``` - - - -# 通过微软Azure云服务申请 Openai API - -由于Openai和微软的关系,现在是可以通过微软的Azure云计算服务直接访问openai的api,免去了注册和网络的问题。 - -快速入门的官方文档的链接是:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python) - -# 申请API - -按文档中的“先决条件”的介绍,出了编程的环境以外,还需要以下三个条件: - -1.  Azure账号并创建订阅 - -2.  为订阅添加Azure OpenAI 服务 - -3.  部署模型 - -## Azure账号并创建订阅 - -### Azure账号 - -创建Azure的账号时最好是有微软的账号,这样似乎更容易获得免费额度(第一个月的200美元,实测了一下,如果用一个刚注册的微软账号登录Azure的话,并没有这一个月的免费额度)。 - -创建Azure账号的网址是:[立即创建 Azure 免费帐户 | Microsoft Azure](https://azure.microsoft.com/zh-cn/free/) - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_944786_iH6AECuZ_tY0EaBd_1685327219?w=1327\&h=695\&type=image/png) - -打开网页后,点击 “免费开始使用” 会跳转到登录或注册页面,如果有微软的账户,直接登录即可,如果没有微软账户,那就需要到微软的网页再另行注册一个。 - -注意,Azure的页面和政策时不时会变化,已实际最新显示的为准就好。 - -### 创建订阅 - -注册好Azure后便可进入主页: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_444847_tk-9S-pxOYuaLs_K_1685327675?w=1865\&h=969\&type=image/png) - -首先需要在订阅里进行添加操作,点开后即可进入订阅的页面: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_612820_z_1AlaEgnJR-rUl0_1685327892?w=1865\&h=969\&type=image/png) - -第一次进来应该是空的,点添加即可创建新的订阅(可以是“免费”或者“即付即用”的订阅),其中订阅ID是后面申请Azure OpenAI需要使用的。 - -## 为订阅添加Azure OpenAI服务 - -之后回到首页,点Azure OpenAI即可进入OpenAI服务的页面(如果不显示的话,则在首页上方的搜索栏里搜索“openai”即可)。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_269759_nExkGcPC0EuAR5cp_1685328130?w=1865\&h=969\&type=image/png) - -不过现在这个服务还不能用。在使用前,还需要在这个网址申请一下: - -[Request Access to Azure OpenAI Service (microsoft.com)](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUOFA5Qk1UWDRBMjg0WFhPMkIzTzhKQ1dWNyQlQCN0PWcu) - -这里有二十来个问题,按照要求和自己的实际情况填写即可。 - -其中需要注意的是 - -1.  千万记得填对"订阅ID" - -2.  需要填一个公司邮箱(可以不是注册用的邮箱)和公司网址 - -之后,在回到上面那个页面,点创建,就会进入创建页面了: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_72708_9d9JYhylPVz3dFWL_1685328372?w=824\&h=590\&type=image/png) - -需要填入“资源组”和“名称”,按照自己的需要填入即可。 - -完成后,在主页的“资源”里就可以看到刚才创建的“资源”了,点击进入后,就可以进行最后的部署了。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_871541_CGCnbgtV9Uk1Jccy_1685329861?w=1217\&h=628\&type=image/png) - -## 部署模型 - -进入资源页面后,在部署模型前,可以先点击“开发”,把密钥和终结点记下来。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_852567_dxCZOrkMlWDSLH0d_1685330736?w=856\&h=568\&type=image/png) - -之后,就可以去部署模型了,点击“部署”即可,会跳转到 Azure OpenAI Stuido 进行下面的操作: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_169225_uWs1gMhpNbnwW4h2_1685329901?w=1865\&h=969\&type=image/png) - -进入 Azure OpenAi Studio 后,点击新建部署,会弹出如下对话框: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_391255_iXUSZAzoud5qlxjJ_1685330224?w=656\&h=641\&type=image/png) - -在这里选 gpt-35-turbo 或需要的模型并按需要填入“部署名”即可完成模型的部署。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_724099_vBaHcUilsm1EtPgK_1685330396?w=1869\&h=482\&type=image/png) - -这个部署名需要记下来。 - -到现在为止,申请操作就完成了,需要记下来的有下面几个东西: - -● 密钥(对应AZURE_API_KEY,1或2都可以) - -● 终结点 (对应AZURE_ENDPOINT) - -● 部署名(对应AZURE_ENGINE,不是模型名) - - -# 修改 config.py - -``` -LLM_MODEL = "azure-gpt-3.5" # 指定启动时的默认模型,当然事后从下拉菜单选也ok - -AZURE_ENDPOINT = "填入终结点" # 见上述图片 -AZURE_API_KEY = "填入azure openai api的密钥" -AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改 -AZURE_ENGINE = "填入部署名" # 见上述图片 - - -# 例如 -API_KEY = '6424e9d19e674092815cea1cb35e67a5' -AZURE_ENDPOINT = 'https://rhtjjjjjj.openai.azure.com/' -AZURE_ENGINE = 'qqwe' -LLM_MODEL = "azure-gpt-3.5" # 可选 ↓↓↓ -``` - - -# 关于费用 - -Azure OpenAI API 还是需要一些费用的(免费订阅只有1个月有效期) - -具体可以可以看这个网址 :[Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable) - -并非网上说的什么“一年白嫖”,但注册方法以及网络问题都比直接使用openai的api要简单一些。 diff --git a/docs/waifu_plugin/autoload.js b/docs/waifu_plugin/autoload.js deleted file mode 100644 index d0648770b6d18512bfa4310508445e76cf72c11a..0000000000000000000000000000000000000000 --- a/docs/waifu_plugin/autoload.js +++ /dev/null @@ -1,30 +0,0 @@ -try { - $("").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head'); - $('body').append('
'); - $.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() { - $.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() { - /* 可直接修改部分参数 */ - live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API - live2d_settings['modelId'] = 5; // 默认模型 ID - live2d_settings['modelTexturesId'] = 1; // 默认材质 ID - live2d_settings['modelStorage'] = false; // 不储存模型 ID - live2d_settings['waifuSize'] = '210x187'; - live2d_settings['waifuTipsSize'] = '187x52'; - live2d_settings['canSwitchModel'] = true; - live2d_settings['canSwitchTextures'] = true; - live2d_settings['canSwitchHitokoto'] = false; - live2d_settings['canTakeScreenshot'] = false; - live2d_settings['canTurnToHomePage'] = false; - live2d_settings['canTurnToAboutPage'] = false; - live2d_settings['showHitokoto'] = false; // 显示一言 - live2d_settings['showF12Status'] = false; // 显示加载状态 - live2d_settings['showF12Message'] = false; // 显示看板娘消息 - live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示 - live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示 - live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词 - - /* 在 initModel 前添加 */ - initModel("file=docs/waifu_plugin/waifu-tips.json"); - }}); - }}); -} catch(err) { console.log("[Error] JQuery is not defined.") } diff --git a/docs/waifu_plugin/flat-ui-icons-regular.eot b/docs/waifu_plugin/flat-ui-icons-regular.eot deleted file mode 100644 index 536680e9f1070d3feb03038448f4ef4764a6784a..0000000000000000000000000000000000000000 Binary files a/docs/waifu_plugin/flat-ui-icons-regular.eot and /dev/null differ diff --git a/docs/waifu_plugin/flat-ui-icons-regular.svg b/docs/waifu_plugin/flat-ui-icons-regular.svg deleted file mode 100644 index e05f3a0d31e417b72be11d5935cbb201729085dc..0000000000000000000000000000000000000000 --- a/docs/waifu_plugin/flat-ui-icons-regular.svg +++ /dev/null @@ -1,126 +0,0 @@ - - - - - -{ - "fontFamily": "flat-ui-icons", - "majorVersion": 1, - "minorVersion": 1, - "fontURL": "http://designmodo.com/flat", - "designer": "Sergey Shmidt", - "designerURL": "http://designmodo.com", - "license": "Attribution-NonCommercial-NoDerivs 3.0 Unported", - "licenseURL": "http://creativecommons.org/licenses/by-nc-nd/3.0/", - "version": "Version 1.1", - "fontId": "flat-ui-icons", - "psName": "flat-ui-icons", - "subFamily": "Regular", - "fullName": "flat-ui-icons", - "description": "Generated by IcoMoon" -} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/waifu_plugin/flat-ui-icons-regular.ttf b/docs/waifu_plugin/flat-ui-icons-regular.ttf deleted file mode 100644 index f4933ff3590f3c3644d32fe50f5d7148c2ede9b6..0000000000000000000000000000000000000000 Binary files a/docs/waifu_plugin/flat-ui-icons-regular.ttf and /dev/null differ diff --git a/docs/waifu_plugin/flat-ui-icons-regular.woff b/docs/waifu_plugin/flat-ui-icons-regular.woff deleted file mode 100644 index f9e9805e768525c296ed284e1bc803ca9e757679..0000000000000000000000000000000000000000 Binary files a/docs/waifu_plugin/flat-ui-icons-regular.woff and /dev/null differ diff --git a/docs/waifu_plugin/jquery-ui.min.js b/docs/waifu_plugin/jquery-ui.min.js deleted file mode 100644 index 862a649869db80cb6c8cd6d48f63ee0b56169a2c..0000000000000000000000000000000000000000 --- a/docs/waifu_plugin/jquery-ui.min.js +++ /dev/null @@ -1,13 +0,0 @@ -/*! jQuery UI - v1.12.1 - 2016-09-14 -* http://jqueryui.com -* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js -* Copyright jQuery Foundation and other contributors; Licensed MIT */ - -(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t("
"))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
"),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t("

")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("

").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n) -}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("
").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("
").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t(""),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n; -this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("
    ").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("
    ").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("
    ",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(t("
    ").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("
    ").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete;var g=/ui-corner-([a-z]){2,6}/g;t.widget("ui.controlgroup",{version:"1.12.1",defaultElement:"
    ",options:{direction:"horizontal",disabled:null,onlyVisible:!0,items:{button:"input[type=button], input[type=submit], input[type=reset], button, a",controlgroupLabel:".ui-controlgroup-label",checkboxradio:"input[type='checkbox'], input[type='radio']",selectmenu:"select",spinner:".ui-spinner-input"}},_create:function(){this._enhance()},_enhance:function(){this.element.attr("role","toolbar"),this.refresh()},_destroy:function(){this._callChildMethod("destroy"),this.childWidgets.removeData("ui-controlgroup-data"),this.element.removeAttr("role"),this.options.items.controlgroupLabel&&this.element.find(this.options.items.controlgroupLabel).find(".ui-controlgroup-label-contents").contents().unwrap()},_initWidgets:function(){var e=this,i=[];t.each(this.options.items,function(s,n){var o,a={};return n?"controlgroupLabel"===s?(o=e.element.find(n),o.each(function(){var e=t(this);e.children(".ui-controlgroup-label-contents").length||e.contents().wrapAll("")}),e._addClass(o,null,"ui-widget ui-widget-content ui-state-default"),i=i.concat(o.get()),void 0):(t.fn[s]&&(a=e["_"+s+"Options"]?e["_"+s+"Options"]("middle"):{classes:{}},e.element.find(n).each(function(){var n=t(this),o=n[s]("instance"),r=t.widget.extend({},a);if("button"!==s||!n.parent(".ui-spinner").length){o||(o=n[s]()[s]("instance")),o&&(r.classes=e._resolveClassesValues(r.classes,o)),n[s](r);var h=n[s]("widget");t.data(h[0],"ui-controlgroup-data",o?o:n[s]("instance")),i.push(h[0])}})),void 0):void 0}),this.childWidgets=t(t.unique(i)),this._addClass(this.childWidgets,"ui-controlgroup-item")},_callChildMethod:function(e){this.childWidgets.each(function(){var i=t(this),s=i.data("ui-controlgroup-data");s&&s[e]&&s[e]()})},_updateCornerClass:function(t,e){var i="ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all",s=this._buildSimpleOptions(e,"label").classes.label;this._removeClass(t,null,i),this._addClass(t,null,s)},_buildSimpleOptions:function(t,e){var i="vertical"===this.options.direction,s={classes:{}};return s.classes[e]={middle:"",first:"ui-corner-"+(i?"top":"left"),last:"ui-corner-"+(i?"bottom":"right"),only:"ui-corner-all"}[t],s},_spinnerOptions:function(t){var e=this._buildSimpleOptions(t,"ui-spinner");return e.classes["ui-spinner-up"]="",e.classes["ui-spinner-down"]="",e},_buttonOptions:function(t){return this._buildSimpleOptions(t,"ui-button")},_checkboxradioOptions:function(t){return this._buildSimpleOptions(t,"ui-checkboxradio-label")},_selectmenuOptions:function(t){var e="vertical"===this.options.direction;return{width:e?"auto":!1,classes:{middle:{"ui-selectmenu-button-open":"","ui-selectmenu-button-closed":""},first:{"ui-selectmenu-button-open":"ui-corner-"+(e?"top":"tl"),"ui-selectmenu-button-closed":"ui-corner-"+(e?"top":"left")},last:{"ui-selectmenu-button-open":e?"":"ui-corner-tr","ui-selectmenu-button-closed":"ui-corner-"+(e?"bottom":"right")},only:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"}}[t]}},_resolveClassesValues:function(e,i){var s={};return t.each(e,function(n){var o=i.options.classes[n]||"";o=t.trim(o.replace(g,"")),s[n]=(o+" "+e[n]).replace(/\s+/g," ")}),s},_setOption:function(t,e){return"direction"===t&&this._removeClass("ui-controlgroup-"+this.options.direction),this._super(t,e),"disabled"===t?(this._callChildMethod(e?"disable":"enable"),void 0):(this.refresh(),void 0)},refresh:function(){var e,i=this;this._addClass("ui-controlgroup ui-controlgroup-"+this.options.direction),"horizontal"===this.options.direction&&this._addClass(null,"ui-helper-clearfix"),this._initWidgets(),e=this.childWidgets,this.options.onlyVisible&&(e=e.filter(":visible")),e.length&&(t.each(["first","last"],function(t,s){var n=e[s]().data("ui-controlgroup-data");if(n&&i["_"+n.widgetName+"Options"]){var o=i["_"+n.widgetName+"Options"](1===e.length?"only":s);o.classes=i._resolveClassesValues(o.classes,n),n.element[n.widgetName](o)}else i._updateCornerClass(e[s](),s)}),this._callChildMethod("refresh"))}}),t.widget("ui.checkboxradio",[t.ui.formResetMixin,{version:"1.12.1",options:{disabled:null,label:null,icon:!0,classes:{"ui-checkboxradio-label":"ui-corner-all","ui-checkboxradio-icon":"ui-corner-all"}},_getCreateOptions:function(){var e,i,s=this,n=this._super()||{};return this._readType(),i=this.element.labels(),this.label=t(i[i.length-1]),this.label.length||t.error("No label found for checkboxradio widget"),this.originalLabel="",this.label.contents().not(this.element[0]).each(function(){s.originalLabel+=3===this.nodeType?t(this).text():this.outerHTML}),this.originalLabel&&(n.label=this.originalLabel),e=this.element[0].disabled,null!=e&&(n.disabled=e),n},_create:function(){var t=this.element[0].checked;this._bindFormResetHandler(),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled),this._setOption("disabled",this.options.disabled),this._addClass("ui-checkboxradio","ui-helper-hidden-accessible"),this._addClass(this.label,"ui-checkboxradio-label","ui-button ui-widget"),"radio"===this.type&&this._addClass(this.label,"ui-checkboxradio-radio-label"),this.options.label&&this.options.label!==this.originalLabel?this._updateLabel():this.originalLabel&&(this.options.label=this.originalLabel),this._enhance(),t&&(this._addClass(this.label,"ui-checkboxradio-checked","ui-state-active"),this.icon&&this._addClass(this.icon,null,"ui-state-hover")),this._on({change:"_toggleClasses",focus:function(){this._addClass(this.label,null,"ui-state-focus ui-visual-focus")},blur:function(){this._removeClass(this.label,null,"ui-state-focus ui-visual-focus")}})},_readType:function(){var e=this.element[0].nodeName.toLowerCase();this.type=this.element[0].type,"input"===e&&/radio|checkbox/.test(this.type)||t.error("Can't create checkboxradio on element.nodeName="+e+" and element.type="+this.type)},_enhance:function(){this._updateIcon(this.element[0].checked)},widget:function(){return this.label},_getRadioGroup:function(){var e,i=this.element[0].name,s="input[name='"+t.ui.escapeSelector(i)+"']";return i?(e=this.form.length?t(this.form[0].elements).filter(s):t(s).filter(function(){return 0===t(this).form().length}),e.not(this.element)):t([])},_toggleClasses:function(){var e=this.element[0].checked;this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",e),this.options.icon&&"checkbox"===this.type&&this._toggleClass(this.icon,null,"ui-icon-check ui-state-checked",e)._toggleClass(this.icon,null,"ui-icon-blank",!e),"radio"===this.type&&this._getRadioGroup().each(function(){var e=t(this).checkboxradio("instance");e&&e._removeClass(e.label,"ui-checkboxradio-checked","ui-state-active")})},_destroy:function(){this._unbindFormResetHandler(),this.icon&&(this.icon.remove(),this.iconSpace.remove())},_setOption:function(t,e){return"label"!==t||e?(this._super(t,e),"disabled"===t?(this._toggleClass(this.label,null,"ui-state-disabled",e),this.element[0].disabled=e,void 0):(this.refresh(),void 0)):void 0},_updateIcon:function(e){var i="ui-icon ui-icon-background ";this.options.icon?(this.icon||(this.icon=t(""),this.iconSpace=t(" "),this._addClass(this.iconSpace,"ui-checkboxradio-icon-space")),"checkbox"===this.type?(i+=e?"ui-icon-check ui-state-checked":"ui-icon-blank",this._removeClass(this.icon,null,e?"ui-icon-blank":"ui-icon-check")):i+="ui-icon-blank",this._addClass(this.icon,"ui-checkboxradio-icon",i),e||this._removeClass(this.icon,null,"ui-icon-check ui-state-checked"),this.icon.prependTo(this.label).after(this.iconSpace)):void 0!==this.icon&&(this.icon.remove(),this.iconSpace.remove(),delete this.icon)},_updateLabel:function(){var t=this.label.contents().not(this.element[0]);this.icon&&(t=t.not(this.icon[0])),this.iconSpace&&(t=t.not(this.iconSpace[0])),t.remove(),this.label.append(this.options.label)},refresh:function(){var t=this.element[0].checked,e=this.element[0].disabled;this._updateIcon(t),this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",t),null!==this.options.label&&this._updateLabel(),e!==this.options.disabled&&this._setOptions({disabled:e})}}]),t.ui.checkboxradio,t.widget("ui.button",{version:"1.12.1",defaultElement:"").addClass(this._triggerClass).html(o?t("").attr({src:o,alt:n,title:n}):n)),e[r?"before":"after"](i.trigger),i.trigger.on("click",function(){return t.datepicker._datepickerShowing&&t.datepicker._lastInput===e[0]?t.datepicker._hideDatepicker():t.datepicker._datepickerShowing&&t.datepicker._lastInput!==e[0]?(t.datepicker._hideDatepicker(),t.datepicker._showDatepicker(e[0])):t.datepicker._showDatepicker(e[0]),!1}))},_autoSize:function(t){if(this._get(t,"autoSize")&&!t.inline){var e,i,s,n,o=new Date(2009,11,20),a=this._get(t,"dateFormat");a.match(/[DM]/)&&(e=function(t){for(i=0,s=0,n=0;t.length>n;n++)t[n].length>i&&(i=t[n].length,s=n);return s},o.setMonth(e(this._get(t,a.match(/MM/)?"monthNames":"monthNamesShort"))),o.setDate(e(this._get(t,a.match(/DD/)?"dayNames":"dayNamesShort"))+20-o.getDay())),t.input.attr("size",this._formatDate(t,o).length)}},_inlineDatepicker:function(e,i){var s=t(e);s.hasClass(this.markerClassName)||(s.addClass(this.markerClassName).append(i.dpDiv),t.data(e,"datepicker",i),this._setDate(i,this._getDefaultDate(i),!0),this._updateDatepicker(i),this._updateAlternate(i),i.settings.disabled&&this._disableDatepicker(e),i.dpDiv.css("display","block"))},_dialogDatepicker:function(e,i,s,n,o){var r,h,l,c,u,d=this._dialogInst;return d||(this.uuid+=1,r="dp"+this.uuid,this._dialogInput=t(""),this._dialogInput.on("keydown",this._doKeyDown),t("body").append(this._dialogInput),d=this._dialogInst=this._newInst(this._dialogInput,!1),d.settings={},t.data(this._dialogInput[0],"datepicker",d)),a(d.settings,n||{}),i=i&&i.constructor===Date?this._formatDate(d,i):i,this._dialogInput.val(i),this._pos=o?o.length?o:[o.pageX,o.pageY]:null,this._pos||(h=document.documentElement.clientWidth,l=document.documentElement.clientHeight,c=document.documentElement.scrollLeft||document.body.scrollLeft,u=document.documentElement.scrollTop||document.body.scrollTop,this._pos=[h/2-100+c,l/2-150+u]),this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px"),d.settings.onSelect=s,this._inDialog=!0,this.dpDiv.addClass(this._dialogClass),this._showDatepicker(this._dialogInput[0]),t.blockUI&&t.blockUI(this.dpDiv),t.data(this._dialogInput[0],"datepicker",d),this},_destroyDatepicker:function(e){var i,s=t(e),n=t.data(e,"datepicker");s.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),t.removeData(e,"datepicker"),"input"===i?(n.append.remove(),n.trigger.remove(),s.removeClass(this.markerClassName).off("focus",this._showDatepicker).off("keydown",this._doKeyDown).off("keypress",this._doKeyPress).off("keyup",this._doKeyUp)):("div"===i||"span"===i)&&s.removeClass(this.markerClassName).empty(),m===n&&(m=null))},_enableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!1,o.trigger.filter("button").each(function(){this.disabled=!1}).end().filter("img").css({opacity:"1.0",cursor:""})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().removeClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!1)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}))},_disableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!0,o.trigger.filter("button").each(function(){this.disabled=!0}).end().filter("img").css({opacity:"0.5",cursor:"default"})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().addClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!0)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}),this._disabledInputs[this._disabledInputs.length]=e)},_isDisabledDatepicker:function(t){if(!t)return!1;for(var e=0;this._disabledInputs.length>e;e++)if(this._disabledInputs[e]===t)return!0;return!1},_getInst:function(e){try{return t.data(e,"datepicker")}catch(i){throw"Missing instance data for this datepicker"}},_optionDatepicker:function(e,i,s){var n,o,r,h,l=this._getInst(e);return 2===arguments.length&&"string"==typeof i?"defaults"===i?t.extend({},t.datepicker._defaults):l?"all"===i?t.extend({},l.settings):this._get(l,i):null:(n=i||{},"string"==typeof i&&(n={},n[i]=s),l&&(this._curInst===l&&this._hideDatepicker(),o=this._getDateDatepicker(e,!0),r=this._getMinMaxDate(l,"min"),h=this._getMinMaxDate(l,"max"),a(l.settings,n),null!==r&&void 0!==n.dateFormat&&void 0===n.minDate&&(l.settings.minDate=this._formatDate(l,r)),null!==h&&void 0!==n.dateFormat&&void 0===n.maxDate&&(l.settings.maxDate=this._formatDate(l,h)),"disabled"in n&&(n.disabled?this._disableDatepicker(e):this._enableDatepicker(e)),this._attachments(t(e),l),this._autoSize(l),this._setDate(l,o),this._updateAlternate(l),this._updateDatepicker(l)),void 0)},_changeDatepicker:function(t,e,i){this._optionDatepicker(t,e,i)},_refreshDatepicker:function(t){var e=this._getInst(t);e&&this._updateDatepicker(e)},_setDateDatepicker:function(t,e){var i=this._getInst(t);i&&(this._setDate(i,e),this._updateDatepicker(i),this._updateAlternate(i))},_getDateDatepicker:function(t,e){var i=this._getInst(t);return i&&!i.inline&&this._setDateFromField(i,e),i?this._getDate(i):null},_doKeyDown:function(e){var i,s,n,o=t.datepicker._getInst(e.target),a=!0,r=o.dpDiv.is(".ui-datepicker-rtl");if(o._keyEvent=!0,t.datepicker._datepickerShowing)switch(e.keyCode){case 9:t.datepicker._hideDatepicker(),a=!1;break;case 13:return n=t("td."+t.datepicker._dayOverClass+":not(."+t.datepicker._currentClass+")",o.dpDiv),n[0]&&t.datepicker._selectDay(e.target,o.selectedMonth,o.selectedYear,n[0]),i=t.datepicker._get(o,"onSelect"),i?(s=t.datepicker._formatDate(o),i.apply(o.input?o.input[0]:null,[s,o])):t.datepicker._hideDatepicker(),!1;case 27:t.datepicker._hideDatepicker();break;case 33:t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 34:t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 35:(e.ctrlKey||e.metaKey)&&t.datepicker._clearDate(e.target),a=e.ctrlKey||e.metaKey;break;case 36:(e.ctrlKey||e.metaKey)&&t.datepicker._gotoToday(e.target),a=e.ctrlKey||e.metaKey;break;case 37:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?1:-1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 38:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,-7,"D"),a=e.ctrlKey||e.metaKey;break;case 39:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?-1:1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 40:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,7,"D"),a=e.ctrlKey||e.metaKey;break;default:a=!1}else 36===e.keyCode&&e.ctrlKey?t.datepicker._showDatepicker(this):a=!1;a&&(e.preventDefault(),e.stopPropagation())},_doKeyPress:function(e){var i,s,n=t.datepicker._getInst(e.target);return t.datepicker._get(n,"constrainInput")?(i=t.datepicker._possibleChars(t.datepicker._get(n,"dateFormat")),s=String.fromCharCode(null==e.charCode?e.keyCode:e.charCode),e.ctrlKey||e.metaKey||" ">s||!i||i.indexOf(s)>-1):void 0},_doKeyUp:function(e){var i,s=t.datepicker._getInst(e.target);if(s.input.val()!==s.lastVal)try{i=t.datepicker.parseDate(t.datepicker._get(s,"dateFormat"),s.input?s.input.val():null,t.datepicker._getFormatConfig(s)),i&&(t.datepicker._setDateFromField(s),t.datepicker._updateAlternate(s),t.datepicker._updateDatepicker(s))}catch(n){}return!0},_showDatepicker:function(e){if(e=e.target||e,"input"!==e.nodeName.toLowerCase()&&(e=t("input",e.parentNode)[0]),!t.datepicker._isDisabledDatepicker(e)&&t.datepicker._lastInput!==e){var s,n,o,r,h,l,c;s=t.datepicker._getInst(e),t.datepicker._curInst&&t.datepicker._curInst!==s&&(t.datepicker._curInst.dpDiv.stop(!0,!0),s&&t.datepicker._datepickerShowing&&t.datepicker._hideDatepicker(t.datepicker._curInst.input[0])),n=t.datepicker._get(s,"beforeShow"),o=n?n.apply(e,[e,s]):{},o!==!1&&(a(s.settings,o),s.lastVal=null,t.datepicker._lastInput=e,t.datepicker._setDateFromField(s),t.datepicker._inDialog&&(e.value=""),t.datepicker._pos||(t.datepicker._pos=t.datepicker._findPos(e),t.datepicker._pos[1]+=e.offsetHeight),r=!1,t(e).parents().each(function(){return r|="fixed"===t(this).css("position"),!r}),h={left:t.datepicker._pos[0],top:t.datepicker._pos[1]},t.datepicker._pos=null,s.dpDiv.empty(),s.dpDiv.css({position:"absolute",display:"block",top:"-1000px"}),t.datepicker._updateDatepicker(s),h=t.datepicker._checkOffset(s,h,r),s.dpDiv.css({position:t.datepicker._inDialog&&t.blockUI?"static":r?"fixed":"absolute",display:"none",left:h.left+"px",top:h.top+"px"}),s.inline||(l=t.datepicker._get(s,"showAnim"),c=t.datepicker._get(s,"duration"),s.dpDiv.css("z-index",i(t(e))+1),t.datepicker._datepickerShowing=!0,t.effects&&t.effects.effect[l]?s.dpDiv.show(l,t.datepicker._get(s,"showOptions"),c):s.dpDiv[l||"show"](l?c:null),t.datepicker._shouldFocusInput(s)&&s.input.trigger("focus"),t.datepicker._curInst=s)) -}},_updateDatepicker:function(e){this.maxRows=4,m=e,e.dpDiv.empty().append(this._generateHTML(e)),this._attachHandlers(e);var i,s=this._getNumberOfMonths(e),n=s[1],a=17,r=e.dpDiv.find("."+this._dayOverClass+" a");r.length>0&&o.apply(r.get(0)),e.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""),n>1&&e.dpDiv.addClass("ui-datepicker-multi-"+n).css("width",a*n+"em"),e.dpDiv[(1!==s[0]||1!==s[1]?"add":"remove")+"Class"]("ui-datepicker-multi"),e.dpDiv[(this._get(e,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"),e===t.datepicker._curInst&&t.datepicker._datepickerShowing&&t.datepicker._shouldFocusInput(e)&&e.input.trigger("focus"),e.yearshtml&&(i=e.yearshtml,setTimeout(function(){i===e.yearshtml&&e.yearshtml&&e.dpDiv.find("select.ui-datepicker-year:first").replaceWith(e.yearshtml),i=e.yearshtml=null},0))},_shouldFocusInput:function(t){return t.input&&t.input.is(":visible")&&!t.input.is(":disabled")&&!t.input.is(":focus")},_checkOffset:function(e,i,s){var n=e.dpDiv.outerWidth(),o=e.dpDiv.outerHeight(),a=e.input?e.input.outerWidth():0,r=e.input?e.input.outerHeight():0,h=document.documentElement.clientWidth+(s?0:t(document).scrollLeft()),l=document.documentElement.clientHeight+(s?0:t(document).scrollTop());return i.left-=this._get(e,"isRTL")?n-a:0,i.left-=s&&i.left===e.input.offset().left?t(document).scrollLeft():0,i.top-=s&&i.top===e.input.offset().top+r?t(document).scrollTop():0,i.left-=Math.min(i.left,i.left+n>h&&h>n?Math.abs(i.left+n-h):0),i.top-=Math.min(i.top,i.top+o>l&&l>o?Math.abs(o+r):0),i},_findPos:function(e){for(var i,s=this._getInst(e),n=this._get(s,"isRTL");e&&("hidden"===e.type||1!==e.nodeType||t.expr.filters.hidden(e));)e=e[n?"previousSibling":"nextSibling"];return i=t(e).offset(),[i.left,i.top]},_hideDatepicker:function(e){var i,s,n,o,a=this._curInst;!a||e&&a!==t.data(e,"datepicker")||this._datepickerShowing&&(i=this._get(a,"showAnim"),s=this._get(a,"duration"),n=function(){t.datepicker._tidyDialog(a)},t.effects&&(t.effects.effect[i]||t.effects[i])?a.dpDiv.hide(i,t.datepicker._get(a,"showOptions"),s,n):a.dpDiv["slideDown"===i?"slideUp":"fadeIn"===i?"fadeOut":"hide"](i?s:null,n),i||n(),this._datepickerShowing=!1,o=this._get(a,"onClose"),o&&o.apply(a.input?a.input[0]:null,[a.input?a.input.val():"",a]),this._lastInput=null,this._inDialog&&(this._dialogInput.css({position:"absolute",left:"0",top:"-100px"}),t.blockUI&&(t.unblockUI(),t("body").append(this.dpDiv))),this._inDialog=!1)},_tidyDialog:function(t){t.dpDiv.removeClass(this._dialogClass).off(".ui-datepicker-calendar")},_checkExternalClick:function(e){if(t.datepicker._curInst){var i=t(e.target),s=t.datepicker._getInst(i[0]);(i[0].id!==t.datepicker._mainDivId&&0===i.parents("#"+t.datepicker._mainDivId).length&&!i.hasClass(t.datepicker.markerClassName)&&!i.closest("."+t.datepicker._triggerClass).length&&t.datepicker._datepickerShowing&&(!t.datepicker._inDialog||!t.blockUI)||i.hasClass(t.datepicker.markerClassName)&&t.datepicker._curInst!==s)&&t.datepicker._hideDatepicker()}},_adjustDate:function(e,i,s){var n=t(e),o=this._getInst(n[0]);this._isDisabledDatepicker(n[0])||(this._adjustInstDate(o,i+("M"===s?this._get(o,"showCurrentAtPos"):0),s),this._updateDatepicker(o))},_gotoToday:function(e){var i,s=t(e),n=this._getInst(s[0]);this._get(n,"gotoCurrent")&&n.currentDay?(n.selectedDay=n.currentDay,n.drawMonth=n.selectedMonth=n.currentMonth,n.drawYear=n.selectedYear=n.currentYear):(i=new Date,n.selectedDay=i.getDate(),n.drawMonth=n.selectedMonth=i.getMonth(),n.drawYear=n.selectedYear=i.getFullYear()),this._notifyChange(n),this._adjustDate(s)},_selectMonthYear:function(e,i,s){var n=t(e),o=this._getInst(n[0]);o["selected"+("M"===s?"Month":"Year")]=o["draw"+("M"===s?"Month":"Year")]=parseInt(i.options[i.selectedIndex].value,10),this._notifyChange(o),this._adjustDate(n)},_selectDay:function(e,i,s,n){var o,a=t(e);t(n).hasClass(this._unselectableClass)||this._isDisabledDatepicker(a[0])||(o=this._getInst(a[0]),o.selectedDay=o.currentDay=t("a",n).html(),o.selectedMonth=o.currentMonth=i,o.selectedYear=o.currentYear=s,this._selectDate(e,this._formatDate(o,o.currentDay,o.currentMonth,o.currentYear)))},_clearDate:function(e){var i=t(e);this._selectDate(i,"")},_selectDate:function(e,i){var s,n=t(e),o=this._getInst(n[0]);i=null!=i?i:this._formatDate(o),o.input&&o.input.val(i),this._updateAlternate(o),s=this._get(o,"onSelect"),s?s.apply(o.input?o.input[0]:null,[i,o]):o.input&&o.input.trigger("change"),o.inline?this._updateDatepicker(o):(this._hideDatepicker(),this._lastInput=o.input[0],"object"!=typeof o.input[0]&&o.input.trigger("focus"),this._lastInput=null)},_updateAlternate:function(e){var i,s,n,o=this._get(e,"altField");o&&(i=this._get(e,"altFormat")||this._get(e,"dateFormat"),s=this._getDate(e),n=this.formatDate(i,s,this._getFormatConfig(e)),t(o).val(n))},noWeekends:function(t){var e=t.getDay();return[e>0&&6>e,""]},iso8601Week:function(t){var e,i=new Date(t.getTime());return i.setDate(i.getDate()+4-(i.getDay()||7)),e=i.getTime(),i.setMonth(0),i.setDate(1),Math.floor(Math.round((e-i)/864e5)/7)+1},parseDate:function(e,i,s){if(null==e||null==i)throw"Invalid arguments";if(i="object"==typeof i?""+i:i+"",""===i)return null;var n,o,a,r,h=0,l=(s?s.shortYearCutoff:null)||this._defaults.shortYearCutoff,c="string"!=typeof l?l:(new Date).getFullYear()%100+parseInt(l,10),u=(s?s.dayNamesShort:null)||this._defaults.dayNamesShort,d=(s?s.dayNames:null)||this._defaults.dayNames,p=(s?s.monthNamesShort:null)||this._defaults.monthNamesShort,f=(s?s.monthNames:null)||this._defaults.monthNames,g=-1,m=-1,_=-1,v=-1,b=!1,y=function(t){var i=e.length>n+1&&e.charAt(n+1)===t;return i&&n++,i},w=function(t){var e=y(t),s="@"===t?14:"!"===t?20:"y"===t&&e?4:"o"===t?3:2,n="y"===t?s:1,o=RegExp("^\\d{"+n+","+s+"}"),a=i.substring(h).match(o);if(!a)throw"Missing number at position "+h;return h+=a[0].length,parseInt(a[0],10)},k=function(e,s,n){var o=-1,a=t.map(y(e)?n:s,function(t,e){return[[e,t]]}).sort(function(t,e){return-(t[1].length-e[1].length)});if(t.each(a,function(t,e){var s=e[1];return i.substr(h,s.length).toLowerCase()===s.toLowerCase()?(o=e[0],h+=s.length,!1):void 0}),-1!==o)return o+1;throw"Unknown name at position "+h},x=function(){if(i.charAt(h)!==e.charAt(n))throw"Unexpected literal at position "+h;h++};for(n=0;e.length>n;n++)if(b)"'"!==e.charAt(n)||y("'")?x():b=!1;else switch(e.charAt(n)){case"d":_=w("d");break;case"D":k("D",u,d);break;case"o":v=w("o");break;case"m":m=w("m");break;case"M":m=k("M",p,f);break;case"y":g=w("y");break;case"@":r=new Date(w("@")),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"!":r=new Date((w("!")-this._ticksTo1970)/1e4),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"'":y("'")?x():b=!0;break;default:x()}if(i.length>h&&(a=i.substr(h),!/^\s+/.test(a)))throw"Extra/unparsed characters found in date: "+a;if(-1===g?g=(new Date).getFullYear():100>g&&(g+=(new Date).getFullYear()-(new Date).getFullYear()%100+(c>=g?0:-100)),v>-1)for(m=1,_=v;;){if(o=this._getDaysInMonth(g,m-1),o>=_)break;m++,_-=o}if(r=this._daylightSavingAdjust(new Date(g,m-1,_)),r.getFullYear()!==g||r.getMonth()+1!==m||r.getDate()!==_)throw"Invalid date";return r},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:1e7*60*60*24*(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925)),formatDate:function(t,e,i){if(!e)return"";var s,n=(i?i.dayNamesShort:null)||this._defaults.dayNamesShort,o=(i?i.dayNames:null)||this._defaults.dayNames,a=(i?i.monthNamesShort:null)||this._defaults.monthNamesShort,r=(i?i.monthNames:null)||this._defaults.monthNames,h=function(e){var i=t.length>s+1&&t.charAt(s+1)===e;return i&&s++,i},l=function(t,e,i){var s=""+e;if(h(t))for(;i>s.length;)s="0"+s;return s},c=function(t,e,i,s){return h(t)?s[e]:i[e]},u="",d=!1;if(e)for(s=0;t.length>s;s++)if(d)"'"!==t.charAt(s)||h("'")?u+=t.charAt(s):d=!1;else switch(t.charAt(s)){case"d":u+=l("d",e.getDate(),2);break;case"D":u+=c("D",e.getDay(),n,o);break;case"o":u+=l("o",Math.round((new Date(e.getFullYear(),e.getMonth(),e.getDate()).getTime()-new Date(e.getFullYear(),0,0).getTime())/864e5),3);break;case"m":u+=l("m",e.getMonth()+1,2);break;case"M":u+=c("M",e.getMonth(),a,r);break;case"y":u+=h("y")?e.getFullYear():(10>e.getFullYear()%100?"0":"")+e.getFullYear()%100;break;case"@":u+=e.getTime();break;case"!":u+=1e4*e.getTime()+this._ticksTo1970;break;case"'":h("'")?u+="'":d=!0;break;default:u+=t.charAt(s)}return u},_possibleChars:function(t){var e,i="",s=!1,n=function(i){var s=t.length>e+1&&t.charAt(e+1)===i;return s&&e++,s};for(e=0;t.length>e;e++)if(s)"'"!==t.charAt(e)||n("'")?i+=t.charAt(e):s=!1;else switch(t.charAt(e)){case"d":case"m":case"y":case"@":i+="0123456789";break;case"D":case"M":return null;case"'":n("'")?i+="'":s=!0;break;default:i+=t.charAt(e)}return i},_get:function(t,e){return void 0!==t.settings[e]?t.settings[e]:this._defaults[e]},_setDateFromField:function(t,e){if(t.input.val()!==t.lastVal){var i=this._get(t,"dateFormat"),s=t.lastVal=t.input?t.input.val():null,n=this._getDefaultDate(t),o=n,a=this._getFormatConfig(t);try{o=this.parseDate(i,s,a)||n}catch(r){s=e?"":s}t.selectedDay=o.getDate(),t.drawMonth=t.selectedMonth=o.getMonth(),t.drawYear=t.selectedYear=o.getFullYear(),t.currentDay=s?o.getDate():0,t.currentMonth=s?o.getMonth():0,t.currentYear=s?o.getFullYear():0,this._adjustInstDate(t)}},_getDefaultDate:function(t){return this._restrictMinMax(t,this._determineDate(t,this._get(t,"defaultDate"),new Date))},_determineDate:function(e,i,s){var n=function(t){var e=new Date;return e.setDate(e.getDate()+t),e},o=function(i){try{return t.datepicker.parseDate(t.datepicker._get(e,"dateFormat"),i,t.datepicker._getFormatConfig(e))}catch(s){}for(var n=(i.toLowerCase().match(/^c/)?t.datepicker._getDate(e):null)||new Date,o=n.getFullYear(),a=n.getMonth(),r=n.getDate(),h=/([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,l=h.exec(i);l;){switch(l[2]||"d"){case"d":case"D":r+=parseInt(l[1],10);break;case"w":case"W":r+=7*parseInt(l[1],10);break;case"m":case"M":a+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a));break;case"y":case"Y":o+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a))}l=h.exec(i)}return new Date(o,a,r)},a=null==i||""===i?s:"string"==typeof i?o(i):"number"==typeof i?isNaN(i)?s:n(i):new Date(i.getTime());return a=a&&"Invalid Date"==""+a?s:a,a&&(a.setHours(0),a.setMinutes(0),a.setSeconds(0),a.setMilliseconds(0)),this._daylightSavingAdjust(a)},_daylightSavingAdjust:function(t){return t?(t.setHours(t.getHours()>12?t.getHours()+2:0),t):null},_setDate:function(t,e,i){var s=!e,n=t.selectedMonth,o=t.selectedYear,a=this._restrictMinMax(t,this._determineDate(t,e,new Date));t.selectedDay=t.currentDay=a.getDate(),t.drawMonth=t.selectedMonth=t.currentMonth=a.getMonth(),t.drawYear=t.selectedYear=t.currentYear=a.getFullYear(),n===t.selectedMonth&&o===t.selectedYear||i||this._notifyChange(t),this._adjustInstDate(t),t.input&&t.input.val(s?"":this._formatDate(t))},_getDate:function(t){var e=!t.currentYear||t.input&&""===t.input.val()?null:this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return e},_attachHandlers:function(e){var i=this._get(e,"stepMonths"),s="#"+e.id.replace(/\\\\/g,"\\");e.dpDiv.find("[data-handler]").map(function(){var e={prev:function(){t.datepicker._adjustDate(s,-i,"M")},next:function(){t.datepicker._adjustDate(s,+i,"M")},hide:function(){t.datepicker._hideDatepicker()},today:function(){t.datepicker._gotoToday(s)},selectDay:function(){return t.datepicker._selectDay(s,+this.getAttribute("data-month"),+this.getAttribute("data-year"),this),!1},selectMonth:function(){return t.datepicker._selectMonthYear(s,this,"M"),!1},selectYear:function(){return t.datepicker._selectMonthYear(s,this,"Y"),!1}};t(this).on(this.getAttribute("data-event"),e[this.getAttribute("data-handler")])})},_generateHTML:function(t){var e,i,s,n,o,a,r,h,l,c,u,d,p,f,g,m,_,v,b,y,w,k,x,C,D,I,T,P,M,S,H,z,O,A,N,W,E,F,L,R=new Date,B=this._daylightSavingAdjust(new Date(R.getFullYear(),R.getMonth(),R.getDate())),Y=this._get(t,"isRTL"),j=this._get(t,"showButtonPanel"),q=this._get(t,"hideIfNoPrevNext"),K=this._get(t,"navigationAsDateFormat"),U=this._getNumberOfMonths(t),V=this._get(t,"showCurrentAtPos"),$=this._get(t,"stepMonths"),X=1!==U[0]||1!==U[1],G=this._daylightSavingAdjust(t.currentDay?new Date(t.currentYear,t.currentMonth,t.currentDay):new Date(9999,9,9)),Q=this._getMinMaxDate(t,"min"),J=this._getMinMaxDate(t,"max"),Z=t.drawMonth-V,te=t.drawYear;if(0>Z&&(Z+=12,te--),J)for(e=this._daylightSavingAdjust(new Date(J.getFullYear(),J.getMonth()-U[0]*U[1]+1,J.getDate())),e=Q&&Q>e?Q:e;this._daylightSavingAdjust(new Date(te,Z,1))>e;)Z--,0>Z&&(Z=11,te--);for(t.drawMonth=Z,t.drawYear=te,i=this._get(t,"prevText"),i=K?this.formatDate(i,this._daylightSavingAdjust(new Date(te,Z-$,1)),this._getFormatConfig(t)):i,s=this._canAdjustMonth(t,-1,te,Z)?""+i+"":q?"":""+i+"",n=this._get(t,"nextText"),n=K?this.formatDate(n,this._daylightSavingAdjust(new Date(te,Z+$,1)),this._getFormatConfig(t)):n,o=this._canAdjustMonth(t,1,te,Z)?""+n+"":q?"":""+n+"",a=this._get(t,"currentText"),r=this._get(t,"gotoCurrent")&&t.currentDay?G:B,a=K?this.formatDate(a,r,this._getFormatConfig(t)):a,h=t.inline?"":"",l=j?"
    "+(Y?h:"")+(this._isInRange(t,r)?"":"")+(Y?"":h)+"
    ":"",c=parseInt(this._get(t,"firstDay"),10),c=isNaN(c)?0:c,u=this._get(t,"showWeek"),d=this._get(t,"dayNames"),p=this._get(t,"dayNamesMin"),f=this._get(t,"monthNames"),g=this._get(t,"monthNamesShort"),m=this._get(t,"beforeShowDay"),_=this._get(t,"showOtherMonths"),v=this._get(t,"selectOtherMonths"),b=this._getDefaultDate(t),y="",k=0;U[0]>k;k++){for(x="",this.maxRows=4,C=0;U[1]>C;C++){if(D=this._daylightSavingAdjust(new Date(te,Z,t.selectedDay)),I=" ui-corner-all",T="",X){if(T+="
    "}for(T+="
    "+(/all|left/.test(I)&&0===k?Y?o:s:"")+(/all|right/.test(I)&&0===k?Y?s:o:"")+this._generateMonthYearHeader(t,Z,te,Q,J,k>0||C>0,f,g)+"
    "+"",P=u?"":"",w=0;7>w;w++)M=(w+c)%7,P+="";for(T+=P+"",S=this._getDaysInMonth(te,Z),te===t.selectedYear&&Z===t.selectedMonth&&(t.selectedDay=Math.min(t.selectedDay,S)),H=(this._getFirstDayOfMonth(te,Z)-c+7)%7,z=Math.ceil((H+S)/7),O=X?this.maxRows>z?this.maxRows:z:z,this.maxRows=O,A=this._daylightSavingAdjust(new Date(te,Z,1-H)),N=0;O>N;N++){for(T+="",W=u?"":"",w=0;7>w;w++)E=m?m.apply(t.input?t.input[0]:null,[A]):[!0,""],F=A.getMonth()!==Z,L=F&&!v||!E[0]||Q&&Q>A||J&&A>J,W+="",A.setDate(A.getDate()+1),A=this._daylightSavingAdjust(A);T+=W+""}Z++,Z>11&&(Z=0,te++),T+="
    "+this._get(t,"weekHeader")+"=5?" class='ui-datepicker-week-end'":"")+">"+""+p[M]+"
    "+this._get(t,"calculateWeek")(A)+""+(F&&!_?" ":L?""+A.getDate()+"":""+A.getDate()+"")+"
    "+(X?"
    "+(U[0]>0&&C===U[1]-1?"
    ":""):""),x+=T}y+=x}return y+=l,t._keyEvent=!1,y},_generateMonthYearHeader:function(t,e,i,s,n,o,a,r){var h,l,c,u,d,p,f,g,m=this._get(t,"changeMonth"),_=this._get(t,"changeYear"),v=this._get(t,"showMonthAfterYear"),b="
    ",y="";if(o||!m)y+=""+a[e]+"";else{for(h=s&&s.getFullYear()===i,l=n&&n.getFullYear()===i,y+=""}if(v||(b+=y+(!o&&m&&_?"":" ")),!t.yearshtml)if(t.yearshtml="",o||!_)b+=""+i+"";else{for(u=this._get(t,"yearRange").split(":"),d=(new Date).getFullYear(),p=function(t){var e=t.match(/c[+\-].*/)?i+parseInt(t.substring(1),10):t.match(/[+\-].*/)?d+parseInt(t,10):parseInt(t,10);return isNaN(e)?d:e},f=p(u[0]),g=Math.max(f,p(u[1]||"")),f=s?Math.max(f,s.getFullYear()):f,g=n?Math.min(g,n.getFullYear()):g,t.yearshtml+="",b+=t.yearshtml,t.yearshtml=null}return b+=this._get(t,"yearSuffix"),v&&(b+=(!o&&m&&_?"":" ")+y),b+="
    "},_adjustInstDate:function(t,e,i){var s=t.selectedYear+("Y"===i?e:0),n=t.selectedMonth+("M"===i?e:0),o=Math.min(t.selectedDay,this._getDaysInMonth(s,n))+("D"===i?e:0),a=this._restrictMinMax(t,this._daylightSavingAdjust(new Date(s,n,o)));t.selectedDay=a.getDate(),t.drawMonth=t.selectedMonth=a.getMonth(),t.drawYear=t.selectedYear=a.getFullYear(),("M"===i||"Y"===i)&&this._notifyChange(t)},_restrictMinMax:function(t,e){var i=this._getMinMaxDate(t,"min"),s=this._getMinMaxDate(t,"max"),n=i&&i>e?i:e;return s&&n>s?s:n},_notifyChange:function(t){var e=this._get(t,"onChangeMonthYear");e&&e.apply(t.input?t.input[0]:null,[t.selectedYear,t.selectedMonth+1,t])},_getNumberOfMonths:function(t){var e=this._get(t,"numberOfMonths");return null==e?[1,1]:"number"==typeof e?[1,e]:e},_getMinMaxDate:function(t,e){return this._determineDate(t,this._get(t,e+"Date"),null)},_getDaysInMonth:function(t,e){return 32-this._daylightSavingAdjust(new Date(t,e,32)).getDate()},_getFirstDayOfMonth:function(t,e){return new Date(t,e,1).getDay()},_canAdjustMonth:function(t,e,i,s){var n=this._getNumberOfMonths(t),o=this._daylightSavingAdjust(new Date(i,s+(0>e?e:n[0]*n[1]),1));return 0>e&&o.setDate(this._getDaysInMonth(o.getFullYear(),o.getMonth())),this._isInRange(t,o)},_isInRange:function(t,e){var i,s,n=this._getMinMaxDate(t,"min"),o=this._getMinMaxDate(t,"max"),a=null,r=null,h=this._get(t,"yearRange");return h&&(i=h.split(":"),s=(new Date).getFullYear(),a=parseInt(i[0],10),r=parseInt(i[1],10),i[0].match(/[+\-].*/)&&(a+=s),i[1].match(/[+\-].*/)&&(r+=s)),(!n||e.getTime()>=n.getTime())&&(!o||e.getTime()<=o.getTime())&&(!a||e.getFullYear()>=a)&&(!r||r>=e.getFullYear())},_getFormatConfig:function(t){var e=this._get(t,"shortYearCutoff");return e="string"!=typeof e?e:(new Date).getFullYear()%100+parseInt(e,10),{shortYearCutoff:e,dayNamesShort:this._get(t,"dayNamesShort"),dayNames:this._get(t,"dayNames"),monthNamesShort:this._get(t,"monthNamesShort"),monthNames:this._get(t,"monthNames")}},_formatDate:function(t,e,i,s){e||(t.currentDay=t.selectedDay,t.currentMonth=t.selectedMonth,t.currentYear=t.selectedYear);var n=e?"object"==typeof e?e:this._daylightSavingAdjust(new Date(s,i,e)):this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return this.formatDate(this._get(t,"dateFormat"),n,this._getFormatConfig(t))}}),t.fn.datepicker=function(e){if(!this.length)return this;t.datepicker.initialized||(t(document).on("mousedown",t.datepicker._checkExternalClick),t.datepicker.initialized=!0),0===t("#"+t.datepicker._mainDivId).length&&t("body").append(t.datepicker.dpDiv);var i=Array.prototype.slice.call(arguments,1);return"string"!=typeof e||"isDisabled"!==e&&"getDate"!==e&&"widget"!==e?"option"===e&&2===arguments.length&&"string"==typeof arguments[1]?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i)):this.each(function(){"string"==typeof e?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this].concat(i)):t.datepicker._attachDatepicker(this,e)}):t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i))},t.datepicker=new s,t.datepicker.initialized=!1,t.datepicker.uuid=(new Date).getTime(),t.datepicker.version="1.12.1",t.datepicker,t.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase());var _=!1;t(document).on("mouseup",function(){_=!1}),t.widget("ui.mouse",{version:"1.12.1",options:{cancel:"input, textarea, button, select, option",distance:1,delay:0},_mouseInit:function(){var e=this;this.element.on("mousedown."+this.widgetName,function(t){return e._mouseDown(t)}).on("click."+this.widgetName,function(i){return!0===t.data(i.target,e.widgetName+".preventClickEvent")?(t.removeData(i.target,e.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.off("."+this.widgetName),this._mouseMoveDelegate&&this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(e){if(!_){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(e),this._mouseDownEvent=e;var i=this,s=1===e.which,n="string"==typeof this.options.cancel&&e.target.nodeName?t(e.target).closest(this.options.cancel).length:!1;return s&&!n&&this._mouseCapture(e)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(e)!==!1,!this._mouseStarted)?(e.preventDefault(),!0):(!0===t.data(e.target,this.widgetName+".preventClickEvent")&&t.removeData(e.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(t){return i._mouseMove(t)},this._mouseUpDelegate=function(t){return i._mouseUp(t)},this.document.on("mousemove."+this.widgetName,this._mouseMoveDelegate).on("mouseup."+this.widgetName,this._mouseUpDelegate),e.preventDefault(),_=!0,!0)):!0}},_mouseMove:function(e){if(this._mouseMoved){if(t.ui.ie&&(!document.documentMode||9>document.documentMode)&&!e.button)return this._mouseUp(e);if(!e.which)if(e.originalEvent.altKey||e.originalEvent.ctrlKey||e.originalEvent.metaKey||e.originalEvent.shiftKey)this.ignoreMissingWhich=!0;else if(!this.ignoreMissingWhich)return this._mouseUp(e)}return(e.which||e.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(e),e.preventDefault()):(this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,e)!==!1,this._mouseStarted?this._mouseDrag(e):this._mouseUp(e)),!this._mouseStarted)},_mouseUp:function(e){this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,e.target===this._mouseDownEvent.target&&t.data(e.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(e)),this._mouseDelayTimer&&(clearTimeout(this._mouseDelayTimer),delete this._mouseDelayTimer),this.ignoreMissingWhich=!1,_=!1,e.preventDefault()},_mouseDistanceMet:function(t){return Math.max(Math.abs(this._mouseDownEvent.pageX-t.pageX),Math.abs(this._mouseDownEvent.pageY-t.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),t.ui.plugin={add:function(e,i,s){var n,o=t.ui[e].prototype;for(n in s)o.plugins[n]=o.plugins[n]||[],o.plugins[n].push([i,s[n]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;o.length>n;n++)t.options[o[n][0]]&&o[n][1].apply(t.element,i)}},t.ui.safeBlur=function(e){e&&"body"!==e.nodeName.toLowerCase()&&t(e).trigger("blur")},t.widget("ui.draggable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"drag",options:{addClasses:!0,appendTo:"parent",axis:!1,connectToSortable:!1,containment:!1,cursor:"auto",cursorAt:!1,grid:!1,handle:!1,helper:"original",iframeFix:!1,opacity:!1,refreshPositions:!1,revert:!1,revertDuration:500,scope:"default",scroll:!0,scrollSensitivity:20,scrollSpeed:20,snap:!1,snapMode:"both",snapTolerance:20,stack:!1,zIndex:!1,drag:null,start:null,stop:null},_create:function(){"original"===this.options.helper&&this._setPositionRelative(),this.options.addClasses&&this._addClass("ui-draggable"),this._setHandleClassName(),this._mouseInit()},_setOption:function(t,e){this._super(t,e),"handle"===t&&(this._removeHandleClassName(),this._setHandleClassName())},_destroy:function(){return(this.helper||this.element).is(".ui-draggable-dragging")?(this.destroyOnClear=!0,void 0):(this._removeHandleClassName(),this._mouseDestroy(),void 0)},_mouseCapture:function(e){var i=this.options;return this.helper||i.disabled||t(e.target).closest(".ui-resizable-handle").length>0?!1:(this.handle=this._getHandle(e),this.handle?(this._blurActiveElement(e),this._blockFrames(i.iframeFix===!0?"iframe":i.iframeFix),!0):!1)},_blockFrames:function(e){this.iframeBlocks=this.document.find(e).map(function(){var e=t(this);return t("
    ").css("position","absolute").appendTo(e.parent()).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(e){var i=t.ui.safeActiveElement(this.document[0]),s=t(e.target);s.closest(i).length||t.ui.safeBlur(i)},_mouseStart:function(e){var i=this.options;return this.helper=this._createHelper(e),this._addClass(this.helper,"ui-draggable-dragging"),this._cacheHelperProportions(),t.ui.ddmanager&&(t.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=this.helper.parents().filter(function(){return"fixed"===t(this).css("position")}).length>0,this.positionAbs=this.element.offset(),this._refreshOffsets(e),this.originalPosition=this.position=this._generatePosition(e,!1),this.originalPageX=e.pageX,this.originalPageY=e.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this._setContainment(),this._trigger("start",e)===!1?(this._clear(),!1):(this._cacheHelperProportions(),t.ui.ddmanager&&!i.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this._mouseDrag(e,!0),t.ui.ddmanager&&t.ui.ddmanager.dragStart(this,e),!0)},_refreshOffsets:function(t){this.offset={top:this.positionAbs.top-this.margins.top,left:this.positionAbs.left-this.margins.left,scroll:!1,parent:this._getParentOffset(),relative:this._getRelativeOffset()},this.offset.click={left:t.pageX-this.offset.left,top:t.pageY-this.offset.top}},_mouseDrag:function(e,i){if(this.hasFixedAncestor&&(this.offset.parent=this._getParentOffset()),this.position=this._generatePosition(e,!0),this.positionAbs=this._convertPositionTo("absolute"),!i){var s=this._uiHash();if(this._trigger("drag",e,s)===!1)return this._mouseUp(new t.Event("mouseup",e)),!1;this.position=s.position}return this.helper[0].style.left=this.position.left+"px",this.helper[0].style.top=this.position.top+"px",t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),!1},_mouseStop:function(e){var i=this,s=!1;return t.ui.ddmanager&&!this.options.dropBehaviour&&(s=t.ui.ddmanager.drop(this,e)),this.dropped&&(s=this.dropped,this.dropped=!1),"invalid"===this.options.revert&&!s||"valid"===this.options.revert&&s||this.options.revert===!0||t.isFunction(this.options.revert)&&this.options.revert.call(this.element,s)?t(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){i._trigger("stop",e)!==!1&&i._clear()}):this._trigger("stop",e)!==!1&&this._clear(),!1},_mouseUp:function(e){return this._unblockFrames(),t.ui.ddmanager&&t.ui.ddmanager.dragStop(this,e),this.handleElement.is(e.target)&&this.element.trigger("focus"),t.ui.mouse.prototype._mouseUp.call(this,e)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp(new t.Event("mouseup",{target:this.element[0]})):this._clear(),this},_getHandle:function(e){return this.options.handle?!!t(e.target).closest(this.element.find(this.options.handle)).length:!0},_setHandleClassName:function(){this.handleElement=this.options.handle?this.element.find(this.options.handle):this.element,this._addClass(this.handleElement,"ui-draggable-handle")},_removeHandleClassName:function(){this._removeClass(this.handleElement,"ui-draggable-handle")},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper),n=s?t(i.helper.apply(this.element[0],[e])):"clone"===i.helper?this.element.clone().removeAttr("id"):this.element;return n.parents("body").length||n.appendTo("parent"===i.appendTo?this.element[0].parentNode:i.appendTo),s&&n[0]===this.element[0]&&this._setPositionRelative(),n[0]===this.element[0]||/(fixed|absolute)/.test(n.css("position"))||n.css("position","absolute"),n},_setPositionRelative:function(){/^(?:r|a|f)/.test(this.element.css("position"))||(this.element[0].style.position="relative")},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_isRootNode:function(t){return/(html|body)/i.test(t.tagName)||t===this.document[0]},_getParentOffset:function(){var e=this.offsetParent.offset(),i=this.document[0];return"absolute"===this.cssPosition&&this.scrollParent[0]!==i&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),this._isRootNode(this.offsetParent[0])&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"!==this.cssPosition)return{top:0,left:0};var t=this.element.position(),e=this._isRootNode(this.scrollParent[0]);return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+(e?0:this.scrollParent.scrollTop()),left:t.left-(parseInt(this.helper.css("left"),10)||0)+(e?0:this.scrollParent.scrollLeft())} -},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options,o=this.document[0];return this.relativeContainer=null,n.containment?"window"===n.containment?(this.containment=[t(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,t(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,t(window).scrollLeft()+t(window).width()-this.helperProportions.width-this.margins.left,t(window).scrollTop()+(t(window).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):"document"===n.containment?(this.containment=[0,0,t(o).width()-this.helperProportions.width-this.margins.left,(t(o).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):n.containment.constructor===Array?(this.containment=n.containment,void 0):("parent"===n.containment&&(n.containment=this.helper[0].parentNode),i=t(n.containment),s=i[0],s&&(e=/(scroll|auto)/.test(i.css("overflow")),this.containment=[(parseInt(i.css("borderLeftWidth"),10)||0)+(parseInt(i.css("paddingLeft"),10)||0),(parseInt(i.css("borderTopWidth"),10)||0)+(parseInt(i.css("paddingTop"),10)||0),(e?Math.max(s.scrollWidth,s.offsetWidth):s.offsetWidth)-(parseInt(i.css("borderRightWidth"),10)||0)-(parseInt(i.css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(e?Math.max(s.scrollHeight,s.offsetHeight):s.offsetHeight)-(parseInt(i.css("borderBottomWidth"),10)||0)-(parseInt(i.css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relativeContainer=i),void 0):(this.containment=null,void 0)},_convertPositionTo:function(t,e){e||(e=this.position);var i="absolute"===t?1:-1,s=this._isRootNode(this.scrollParent[0]);return{top:e.top+this.offset.relative.top*i+this.offset.parent.top*i-("fixed"===this.cssPosition?-this.offset.scroll.top:s?0:this.offset.scroll.top)*i,left:e.left+this.offset.relative.left*i+this.offset.parent.left*i-("fixed"===this.cssPosition?-this.offset.scroll.left:s?0:this.offset.scroll.left)*i}},_generatePosition:function(t,e){var i,s,n,o,a=this.options,r=this._isRootNode(this.scrollParent[0]),h=t.pageX,l=t.pageY;return r&&this.offset.scroll||(this.offset.scroll={top:this.scrollParent.scrollTop(),left:this.scrollParent.scrollLeft()}),e&&(this.containment&&(this.relativeContainer?(s=this.relativeContainer.offset(),i=[this.containment[0]+s.left,this.containment[1]+s.top,this.containment[2]+s.left,this.containment[3]+s.top]):i=this.containment,t.pageX-this.offset.click.lefti[2]&&(h=i[2]+this.offset.click.left),t.pageY-this.offset.click.top>i[3]&&(l=i[3]+this.offset.click.top)),a.grid&&(n=a.grid[1]?this.originalPageY+Math.round((l-this.originalPageY)/a.grid[1])*a.grid[1]:this.originalPageY,l=i?n-this.offset.click.top>=i[1]||n-this.offset.click.top>i[3]?n:n-this.offset.click.top>=i[1]?n-a.grid[1]:n+a.grid[1]:n,o=a.grid[0]?this.originalPageX+Math.round((h-this.originalPageX)/a.grid[0])*a.grid[0]:this.originalPageX,h=i?o-this.offset.click.left>=i[0]||o-this.offset.click.left>i[2]?o:o-this.offset.click.left>=i[0]?o-a.grid[0]:o+a.grid[0]:o),"y"===a.axis&&(h=this.originalPageX),"x"===a.axis&&(l=this.originalPageY)),{top:l-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:r?0:this.offset.scroll.top),left:h-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:r?0:this.offset.scroll.left)}},_clear:function(){this._removeClass(this.helper,"ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_trigger:function(e,i,s){return s=s||this._uiHash(),t.ui.plugin.call(this,e,[i,s,this],!0),/^(drag|start|stop)/.test(e)&&(this.positionAbs=this._convertPositionTo("absolute"),s.offset=this.positionAbs),t.Widget.prototype._trigger.call(this,e,i,s)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),t.ui.plugin.add("draggable","connectToSortable",{start:function(e,i,s){var n=t.extend({},i,{item:s.element});s.sortables=[],t(s.options.connectToSortable).each(function(){var i=t(this).sortable("instance");i&&!i.options.disabled&&(s.sortables.push(i),i.refreshPositions(),i._trigger("activate",e,n))})},stop:function(e,i,s){var n=t.extend({},i,{item:s.element});s.cancelHelperRemoval=!1,t.each(s.sortables,function(){var t=this;t.isOver?(t.isOver=0,s.cancelHelperRemoval=!0,t.cancelHelperRemoval=!1,t._storedCSS={position:t.placeholder.css("position"),top:t.placeholder.css("top"),left:t.placeholder.css("left")},t._mouseStop(e),t.options.helper=t.options._helper):(t.cancelHelperRemoval=!0,t._trigger("deactivate",e,n))})},drag:function(e,i,s){t.each(s.sortables,function(){var n=!1,o=this;o.positionAbs=s.positionAbs,o.helperProportions=s.helperProportions,o.offset.click=s.offset.click,o._intersectsWith(o.containerCache)&&(n=!0,t.each(s.sortables,function(){return this.positionAbs=s.positionAbs,this.helperProportions=s.helperProportions,this.offset.click=s.offset.click,this!==o&&this._intersectsWith(this.containerCache)&&t.contains(o.element[0],this.element[0])&&(n=!1),n})),n?(o.isOver||(o.isOver=1,s._parent=i.helper.parent(),o.currentItem=i.helper.appendTo(o.element).data("ui-sortable-item",!0),o.options._helper=o.options.helper,o.options.helper=function(){return i.helper[0]},e.target=o.currentItem[0],o._mouseCapture(e,!0),o._mouseStart(e,!0,!0),o.offset.click.top=s.offset.click.top,o.offset.click.left=s.offset.click.left,o.offset.parent.left-=s.offset.parent.left-o.offset.parent.left,o.offset.parent.top-=s.offset.parent.top-o.offset.parent.top,s._trigger("toSortable",e),s.dropped=o.element,t.each(s.sortables,function(){this.refreshPositions()}),s.currentItem=s.element,o.fromOutside=s),o.currentItem&&(o._mouseDrag(e),i.position=o.position)):o.isOver&&(o.isOver=0,o.cancelHelperRemoval=!0,o.options._revert=o.options.revert,o.options.revert=!1,o._trigger("out",e,o._uiHash(o)),o._mouseStop(e,!0),o.options.revert=o.options._revert,o.options.helper=o.options._helper,o.placeholder&&o.placeholder.remove(),i.helper.appendTo(s._parent),s._refreshOffsets(e),i.position=s._generatePosition(e,!0),s._trigger("fromSortable",e),s.dropped=!1,t.each(s.sortables,function(){this.refreshPositions()}))})}}),t.ui.plugin.add("draggable","cursor",{start:function(e,i,s){var n=t("body"),o=s.options;n.css("cursor")&&(o._cursor=n.css("cursor")),n.css("cursor",o.cursor)},stop:function(e,i,s){var n=s.options;n._cursor&&t("body").css("cursor",n._cursor)}}),t.ui.plugin.add("draggable","opacity",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("opacity")&&(o._opacity=n.css("opacity")),n.css("opacity",o.opacity)},stop:function(e,i,s){var n=s.options;n._opacity&&t(i.helper).css("opacity",n._opacity)}}),t.ui.plugin.add("draggable","scroll",{start:function(t,e,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(e,i,s){var n=s.options,o=!1,a=s.scrollParentNotHidden[0],r=s.document[0];a!==r&&"HTML"!==a.tagName?(n.axis&&"x"===n.axis||(s.overflowOffset.top+a.offsetHeight-e.pageY=0;d--)h=s.snapElements[d].left-s.margins.left,l=h+s.snapElements[d].width,c=s.snapElements[d].top-s.margins.top,u=c+s.snapElements[d].height,h-g>_||m>l+g||c-g>b||v>u+g||!t.contains(s.snapElements[d].item.ownerDocument,s.snapElements[d].item)?(s.snapElements[d].snapping&&s.options.snap.release&&s.options.snap.release.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=!1):("inner"!==f.snapMode&&(n=g>=Math.abs(c-b),o=g>=Math.abs(u-v),a=g>=Math.abs(h-_),r=g>=Math.abs(l-m),n&&(i.position.top=s._convertPositionTo("relative",{top:c-s.helperProportions.height,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h-s.helperProportions.width}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l}).left)),p=n||o||a||r,"outer"!==f.snapMode&&(n=g>=Math.abs(c-v),o=g>=Math.abs(u-b),a=g>=Math.abs(h-m),r=g>=Math.abs(l-_),n&&(i.position.top=s._convertPositionTo("relative",{top:c,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u-s.helperProportions.height,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l-s.helperProportions.width}).left)),!s.snapElements[d].snapping&&(n||o||a||r||p)&&s.options.snap.snap&&s.options.snap.snap.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=n||o||a||r||p)}}),t.ui.plugin.add("draggable","stack",{start:function(e,i,s){var n,o=s.options,a=t.makeArray(t(o.stack)).sort(function(e,i){return(parseInt(t(e).css("zIndex"),10)||0)-(parseInt(t(i).css("zIndex"),10)||0)});a.length&&(n=parseInt(t(a[0]).css("zIndex"),10)||0,t(a).each(function(e){t(this).css("zIndex",n+e)}),this.css("zIndex",n+a.length))}}),t.ui.plugin.add("draggable","zIndex",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("zIndex")&&(o._zIndex=n.css("zIndex")),n.css("zIndex",o.zIndex)},stop:function(e,i,s){var n=s.options;n._zIndex&&t(i.helper).css("zIndex",n._zIndex)}}),t.ui.draggable,t.widget("ui.resizable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,classes:{"ui-resizable-se":"ui-icon ui-icon-gripsmall-diagonal-se"},containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(t){return parseFloat(t)||0},_isNumber:function(t){return!isNaN(parseFloat(t))},_hasScroll:function(e,i){if("hidden"===t(e).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return e[s]>0?!0:(e[s]=1,n=e[s]>0,e[s]=0,n)},_create:function(){var e,i=this.options,s=this;this._addClass("ui-resizable"),t.extend(this,{_aspectRatio:!!i.aspectRatio,aspectRatio:i.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:i.helper||i.ghost||i.animate?i.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(t("
    ").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,e={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(e),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(e),this._proportionallyResize()),this._setupHandles(),i.autoHide&&t(this.element).on("mouseenter",function(){i.disabled||(s._removeClass("ui-resizable-autohide"),s._handles.show())}).on("mouseleave",function(){i.disabled||s.resizing||(s._addClass("ui-resizable-autohide"),s._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy();var e,i=function(e){t(e).removeData("resizable").removeData("ui-resizable").off(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;default:}},_setupHandles:function(){var e,i,s,n,o,a=this.options,r=this;if(this.handles=a.handles||(t(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=t(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),s=this.handles.split(","),this.handles={},i=0;s.length>i;i++)e=t.trim(s[i]),n="ui-resizable-"+e,o=t("
    "),this._addClass(o,"ui-resizable-handle "+n),o.css({zIndex:a.zIndex}),this.handles[e]=".ui-resizable-"+e,this.element.append(o);this._renderAxis=function(e){var i,s,n,o;e=e||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=t(this.handles[i]),this._on(this.handles[i],{mousedown:r._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=t(this.handles[i],this.element),o=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),e.css(n,o),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){r.resizing||(this.className&&(o=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),r.axis=o&&o[1]?o[1]:"se")}),a.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._handles.remove()},_mouseCapture:function(e){var i,s,n=!1;for(i in this.handles)s=t(this.handles[i])[0],(s===e.target||t.contains(s,e.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(e){var i,s,n,o=this.options,a=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),o.containment&&(i+=t(o.containment).scrollLeft()||0,s+=t(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:a.width(),height:a.height()},this.originalSize=this._helper?{width:a.outerWidth(),height:a.outerHeight()}:{width:a.width(),height:a.height()},this.sizeDiff={width:a.outerWidth()-a.width(),height:a.outerHeight()-a.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:e.pageX,top:e.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=t(".ui-resizable-"+this.axis).css("cursor"),t("body").css("cursor","auto"===n?this.axis+"-resize":n),this._addClass("ui-resizable-resizing"),this._propagate("start",e),!0},_mouseDrag:function(e){var i,s,n=this.originalMousePosition,o=this.axis,a=e.pageX-n.left||0,r=e.pageY-n.top||0,h=this._change[o];return this._updatePrevProperties(),h?(i=h.apply(this,[e,a,r]),this._updateVirtualBoundaries(e.shiftKey),(this._aspectRatio||e.shiftKey)&&(i=this._updateRatio(i,e)),i=this._respectSize(i,e),this._updateCache(i),this._propagate("resize",e),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),t.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",e,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(e){this.resizing=!1;var i,s,n,o,a,r,h,l=this.options,c=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:c.sizeDiff.height,o=s?0:c.sizeDiff.width,a={width:c.helper.width()-o,height:c.helper.height()-n},r=parseFloat(c.element.css("left"))+(c.position.left-c.originalPosition.left)||null,h=parseFloat(c.element.css("top"))+(c.position.top-c.originalPosition.top)||null,l.animate||this.element.css(t.extend(a,{top:h,left:r})),c.helper.height(c.size.height),c.helper.width(c.size.width),this._helper&&!l.animate&&this._proportionallyResize()),t("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",e),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s,n,o,a=this.options;o={minWidth:this._isNumber(a.minWidth)?a.minWidth:0,maxWidth:this._isNumber(a.maxWidth)?a.maxWidth:1/0,minHeight:this._isNumber(a.minHeight)?a.minHeight:0,maxHeight:this._isNumber(a.maxHeight)?a.maxHeight:1/0},(this._aspectRatio||t)&&(e=o.minHeight*this.aspectRatio,s=o.minWidth/this.aspectRatio,i=o.maxHeight*this.aspectRatio,n=o.maxWidth/this.aspectRatio,e>o.minWidth&&(o.minWidth=e),s>o.minHeight&&(o.minHeight=s),o.maxWidth>i&&(o.maxWidth=i),o.maxHeight>n&&(o.maxHeight=n)),this._vBoundaries=o},_updateCache:function(t){this.offset=this.helper.offset(),this._isNumber(t.left)&&(this.position.left=t.left),this._isNumber(t.top)&&(this.position.top=t.top),this._isNumber(t.height)&&(this.size.height=t.height),this._isNumber(t.width)&&(this.size.width=t.width)},_updateRatio:function(t){var e=this.position,i=this.size,s=this.axis;return this._isNumber(t.height)?t.width=t.height*this.aspectRatio:this._isNumber(t.width)&&(t.height=t.width/this.aspectRatio),"sw"===s&&(t.left=e.left+(i.width-t.width),t.top=null),"nw"===s&&(t.top=e.top+(i.height-t.height),t.left=e.left+(i.width-t.width)),t},_respectSize:function(t){var e=this._vBoundaries,i=this.axis,s=this._isNumber(t.width)&&e.maxWidth&&e.maxWidtht.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,h=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),c=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=r-e.minWidth),s&&l&&(t.left=r-e.maxWidth),a&&c&&(t.top=h-e.minHeight),n&&c&&(t.top=h-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];4>e;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;this._proportionallyResizeElements.length>e;e++)t=this._proportionallyResizeElements[e],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(t)),t.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var e=this.element,i=this.options;this.elementOffset=e.offset(),this._helper?(this.helper=this.helper||t("
    "),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize,s=this.originalPosition;return{left:s.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},sw:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[e,i,s]))},ne:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},nw:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[e,i,s]))}},_propagate:function(e,i){t.ui.plugin.call(this,e,[i,this.ui()]),"resize"!==e&&this._trigger(e,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),t.ui.plugin.add("resizable","animate",{stop:function(e){var i=t(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,o=n.length&&/textarea/i.test(n[0].nodeName),a=o&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=o?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-a},l=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,c=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(t.extend(h,c&&l?{top:c,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};n&&n.length&&t(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",e)}})}}),t.ui.plugin.add("resizable","containment",{start:function(){var e,i,s,n,o,a,r,h=t(this).resizable("instance"),l=h.options,c=h.element,u=l.containment,d=u instanceof t?u.get(0):/parent/.test(u)?c.parent().get(0):u;d&&(h.containerElement=t(d),/document/.test(u)||u===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:t(document),left:0,top:0,width:t(document).width(),height:t(document).height()||document.body.parentNode.scrollHeight}):(e=t(d),i=[],t(["Top","Right","Left","Bottom"]).each(function(t,s){i[t]=h._num(e.css("padding"+s))}),h.containerOffset=e.offset(),h.containerPosition=e.position(),h.containerSize={height:e.innerHeight()-i[3],width:e.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,o=h.containerSize.width,a=h._hasScroll(d,"left")?d.scrollWidth:o,r=h._hasScroll(d)?d.scrollHeight:n,h.parentData={element:d,left:s.left,top:s.top,width:a,height:r}))},resize:function(e){var i,s,n,o,a=t(this).resizable("instance"),r=a.options,h=a.containerOffset,l=a.position,c=a._aspectRatio||e.shiftKey,u={top:0,left:0},d=a.containerElement,p=!0;d[0]!==document&&/static/.test(d.css("position"))&&(u=h),l.left<(a._helper?h.left:0)&&(a.size.width=a.size.width+(a._helper?a.position.left-h.left:a.position.left-u.left),c&&(a.size.height=a.size.width/a.aspectRatio,p=!1),a.position.left=r.helper?h.left:0),l.top<(a._helper?h.top:0)&&(a.size.height=a.size.height+(a._helper?a.position.top-h.top:a.position.top),c&&(a.size.width=a.size.height*a.aspectRatio,p=!1),a.position.top=a._helper?h.top:0),n=a.containerElement.get(0)===a.element.parent().get(0),o=/relative|absolute/.test(a.containerElement.css("position")),n&&o?(a.offset.left=a.parentData.left+a.position.left,a.offset.top=a.parentData.top+a.position.top):(a.offset.left=a.element.offset().left,a.offset.top=a.element.offset().top),i=Math.abs(a.sizeDiff.width+(a._helper?a.offset.left-u.left:a.offset.left-h.left)),s=Math.abs(a.sizeDiff.height+(a._helper?a.offset.top-u.top:a.offset.top-h.top)),i+a.size.width>=a.parentData.width&&(a.size.width=a.parentData.width-i,c&&(a.size.height=a.size.width/a.aspectRatio,p=!1)),s+a.size.height>=a.parentData.height&&(a.size.height=a.parentData.height-s,c&&(a.size.width=a.size.height*a.aspectRatio,p=!1)),p||(a.position.left=a.prevPosition.left,a.position.top=a.prevPosition.top,a.size.width=a.prevSize.width,a.size.height=a.prevSize.height)},stop:function(){var e=t(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.containerPosition,o=e.containerElement,a=t(e.helper),r=a.offset(),h=a.outerWidth()-e.sizeDiff.width,l=a.outerHeight()-e.sizeDiff.height;e._helper&&!i.animate&&/relative/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l}),e._helper&&!i.animate&&/static/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),t.ui.plugin.add("resizable","alsoResize",{start:function(){var e=t(this).resizable("instance"),i=e.options;t(i.alsoResize).each(function(){var e=t(this);e.data("ui-resizable-alsoresize",{width:parseFloat(e.width()),height:parseFloat(e.height()),left:parseFloat(e.css("left")),top:parseFloat(e.css("top"))})})},resize:function(e,i){var s=t(this).resizable("instance"),n=s.options,o=s.originalSize,a=s.originalPosition,r={height:s.size.height-o.height||0,width:s.size.width-o.width||0,top:s.position.top-a.top||0,left:s.position.left-a.left||0};t(n.alsoResize).each(function(){var e=t(this),s=t(this).data("ui-resizable-alsoresize"),n={},o=e.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];t.each(o,function(t,e){var i=(s[e]||0)+(r[e]||0);i&&i>=0&&(n[e]=i||null)}),e.css(n)})},stop:function(){t(this).removeData("ui-resizable-alsoresize")}}),t.ui.plugin.add("resizable","ghost",{start:function(){var e=t(this).resizable("instance"),i=e.size;e.ghost=e.originalElement.clone(),e.ghost.css({opacity:.25,display:"block",position:"relative",height:i.height,width:i.width,margin:0,left:0,top:0}),e._addClass(e.ghost,"ui-resizable-ghost"),t.uiBackCompat!==!1&&"string"==typeof e.options.ghost&&e.ghost.addClass(this.options.ghost),e.ghost.appendTo(e.helper)},resize:function(){var e=t(this).resizable("instance");e.ghost&&e.ghost.css({position:"relative",height:e.size.height,width:e.size.width})},stop:function(){var e=t(this).resizable("instance");e.ghost&&e.helper&&e.helper.get(0).removeChild(e.ghost.get(0))}}),t.ui.plugin.add("resizable","grid",{resize:function(){var e,i=t(this).resizable("instance"),s=i.options,n=i.size,o=i.originalSize,a=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,c=h[1]||1,u=Math.round((n.width-o.width)/l)*l,d=Math.round((n.height-o.height)/c)*c,p=o.width+u,f=o.height+d,g=s.maxWidth&&p>s.maxWidth,m=s.maxHeight&&f>s.maxHeight,_=s.minWidth&&s.minWidth>p,v=s.minHeight&&s.minHeight>f;s.grid=h,_&&(p+=l),v&&(f+=c),g&&(p-=l),m&&(f-=c),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=a.top-d):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=a.left-u):((0>=f-c||0>=p-l)&&(e=i._getPaddingPlusBorderDimensions(this)),f-c>0?(i.size.height=f,i.position.top=a.top-d):(f=c-e.height,i.size.height=f,i.position.top=a.top+o.height-f),p-l>0?(i.size.width=p,i.position.left=a.left-u):(p=l-e.width,i.size.width=p,i.position.left=a.left+o.width-p))}}),t.ui.resizable,t.widget("ui.dialog",{version:"1.12.1",options:{appendTo:"body",autoOpen:!0,buttons:[],classes:{"ui-dialog":"ui-corner-all","ui-dialog-titlebar":"ui-corner-all"},closeOnEscape:!0,closeText:"Close",draggable:!0,hide:null,height:"auto",maxHeight:null,maxWidth:null,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(e){var i=t(this).css(e).offset().top;0>i&&t(this).css("top",e.top-i)}},resizable:!0,show:null,title:null,width:300,beforeClose:null,close:null,drag:null,dragStart:null,dragStop:null,focus:null,open:null,resize:null,resizeStart:null,resizeStop:null},sizeRelatedOptions:{buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},resizableRelatedOptions:{maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0},_create:function(){this.originalCss={display:this.element[0].style.display,width:this.element[0].style.width,minHeight:this.element[0].style.minHeight,maxHeight:this.element[0].style.maxHeight,height:this.element[0].style.height},this.originalPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.originalTitle=this.element.attr("title"),null==this.options.title&&null!=this.originalTitle&&(this.options.title=this.originalTitle),this.options.disabled&&(this.options.disabled=!1),this._createWrapper(),this.element.show().removeAttr("title").appendTo(this.uiDialog),this._addClass("ui-dialog-content","ui-widget-content"),this._createTitlebar(),this._createButtonPane(),this.options.draggable&&t.fn.draggable&&this._makeDraggable(),this.options.resizable&&t.fn.resizable&&this._makeResizable(),this._isOpen=!1,this._trackFocus()},_init:function(){this.options.autoOpen&&this.open()},_appendTo:function(){var e=this.options.appendTo;return e&&(e.jquery||e.nodeType)?t(e):this.document.find(e||"body").eq(0)},_destroy:function(){var t,e=this.originalPosition;this._untrackInstance(),this._destroyOverlay(),this.element.removeUniqueId().css(this.originalCss).detach(),this.uiDialog.remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),t=e.parent.children().eq(e.index),t.length&&t[0]!==this.element[0]?t.before(this.element):e.parent.append(this.element)},widget:function(){return this.uiDialog -},disable:t.noop,enable:t.noop,close:function(e){var i=this;this._isOpen&&this._trigger("beforeClose",e)!==!1&&(this._isOpen=!1,this._focusedElement=null,this._destroyOverlay(),this._untrackInstance(),this.opener.filter(":focusable").trigger("focus").length||t.ui.safeBlur(t.ui.safeActiveElement(this.document[0])),this._hide(this.uiDialog,this.options.hide,function(){i._trigger("close",e)}))},isOpen:function(){return this._isOpen},moveToTop:function(){this._moveToTop()},_moveToTop:function(e,i){var s=!1,n=this.uiDialog.siblings(".ui-front:visible").map(function(){return+t(this).css("z-index")}).get(),o=Math.max.apply(null,n);return o>=+this.uiDialog.css("z-index")&&(this.uiDialog.css("z-index",o+1),s=!0),s&&!i&&this._trigger("focus",e),s},open:function(){var e=this;return this._isOpen?(this._moveToTop()&&this._focusTabbable(),void 0):(this._isOpen=!0,this.opener=t(t.ui.safeActiveElement(this.document[0])),this._size(),this._position(),this._createOverlay(),this._moveToTop(null,!0),this.overlay&&this.overlay.css("z-index",this.uiDialog.css("z-index")-1),this._show(this.uiDialog,this.options.show,function(){e._focusTabbable(),e._trigger("focus")}),this._makeFocusTarget(),this._trigger("open"),void 0)},_focusTabbable:function(){var t=this._focusedElement;t||(t=this.element.find("[autofocus]")),t.length||(t=this.element.find(":tabbable")),t.length||(t=this.uiDialogButtonPane.find(":tabbable")),t.length||(t=this.uiDialogTitlebarClose.filter(":tabbable")),t.length||(t=this.uiDialog),t.eq(0).trigger("focus")},_keepFocus:function(e){function i(){var e=t.ui.safeActiveElement(this.document[0]),i=this.uiDialog[0]===e||t.contains(this.uiDialog[0],e);i||this._focusTabbable()}e.preventDefault(),i.call(this),this._delay(i)},_createWrapper:function(){this.uiDialog=t("
    ").hide().attr({tabIndex:-1,role:"dialog"}).appendTo(this._appendTo()),this._addClass(this.uiDialog,"ui-dialog","ui-widget ui-widget-content ui-front"),this._on(this.uiDialog,{keydown:function(e){if(this.options.closeOnEscape&&!e.isDefaultPrevented()&&e.keyCode&&e.keyCode===t.ui.keyCode.ESCAPE)return e.preventDefault(),this.close(e),void 0;if(e.keyCode===t.ui.keyCode.TAB&&!e.isDefaultPrevented()){var i=this.uiDialog.find(":tabbable"),s=i.filter(":first"),n=i.filter(":last");e.target!==n[0]&&e.target!==this.uiDialog[0]||e.shiftKey?e.target!==s[0]&&e.target!==this.uiDialog[0]||!e.shiftKey||(this._delay(function(){n.trigger("focus")}),e.preventDefault()):(this._delay(function(){s.trigger("focus")}),e.preventDefault())}},mousedown:function(t){this._moveToTop(t)&&this._focusTabbable()}}),this.element.find("[aria-describedby]").length||this.uiDialog.attr({"aria-describedby":this.element.uniqueId().attr("id")})},_createTitlebar:function(){var e;this.uiDialogTitlebar=t("
    "),this._addClass(this.uiDialogTitlebar,"ui-dialog-titlebar","ui-widget-header ui-helper-clearfix"),this._on(this.uiDialogTitlebar,{mousedown:function(e){t(e.target).closest(".ui-dialog-titlebar-close")||this.uiDialog.trigger("focus")}}),this.uiDialogTitlebarClose=t("").button({label:t("").text(this.options.closeText).html(),icon:"ui-icon-closethick",showLabel:!1}).appendTo(this.uiDialogTitlebar),this._addClass(this.uiDialogTitlebarClose,"ui-dialog-titlebar-close"),this._on(this.uiDialogTitlebarClose,{click:function(t){t.preventDefault(),this.close(t)}}),e=t("").uniqueId().prependTo(this.uiDialogTitlebar),this._addClass(e,"ui-dialog-title"),this._title(e),this.uiDialogTitlebar.prependTo(this.uiDialog),this.uiDialog.attr({"aria-labelledby":e.attr("id")})},_title:function(t){this.options.title?t.text(this.options.title):t.html(" ")},_createButtonPane:function(){this.uiDialogButtonPane=t("
    "),this._addClass(this.uiDialogButtonPane,"ui-dialog-buttonpane","ui-widget-content ui-helper-clearfix"),this.uiButtonSet=t("
    ").appendTo(this.uiDialogButtonPane),this._addClass(this.uiButtonSet,"ui-dialog-buttonset"),this._createButtons()},_createButtons:function(){var e=this,i=this.options.buttons;return this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),t.isEmptyObject(i)||t.isArray(i)&&!i.length?(this._removeClass(this.uiDialog,"ui-dialog-buttons"),void 0):(t.each(i,function(i,s){var n,o;s=t.isFunction(s)?{click:s,text:i}:s,s=t.extend({type:"button"},s),n=s.click,o={icon:s.icon,iconPosition:s.iconPosition,showLabel:s.showLabel,icons:s.icons,text:s.text},delete s.click,delete s.icon,delete s.iconPosition,delete s.showLabel,delete s.icons,"boolean"==typeof s.text&&delete s.text,t("",s).button(o).appendTo(e.uiButtonSet).on("click",function(){n.apply(e.element[0],arguments)})}),this._addClass(this.uiDialog,"ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog),void 0)},_makeDraggable:function(){function e(t){return{position:t.position,offset:t.offset}}var i=this,s=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(s,n){i._addClass(t(this),"ui-dialog-dragging"),i._blockFrames(),i._trigger("dragStart",s,e(n))},drag:function(t,s){i._trigger("drag",t,e(s))},stop:function(n,o){var a=o.offset.left-i.document.scrollLeft(),r=o.offset.top-i.document.scrollTop();s.position={my:"left top",at:"left"+(a>=0?"+":"")+a+" "+"top"+(r>=0?"+":"")+r,of:i.window},i._removeClass(t(this),"ui-dialog-dragging"),i._unblockFrames(),i._trigger("dragStop",n,e(o))}})},_makeResizable:function(){function e(t){return{originalPosition:t.originalPosition,originalSize:t.originalSize,position:t.position,size:t.size}}var i=this,s=this.options,n=s.resizable,o=this.uiDialog.css("position"),a="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:a,start:function(s,n){i._addClass(t(this),"ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,e(n))},resize:function(t,s){i._trigger("resize",t,e(s))},stop:function(n,o){var a=i.uiDialog.offset(),r=a.left-i.document.scrollLeft(),h=a.top-i.document.scrollTop();s.height=i.uiDialog.height(),s.width=i.uiDialog.width(),s.position={my:"left top",at:"left"+(r>=0?"+":"")+r+" "+"top"+(h>=0?"+":"")+h,of:i.window},i._removeClass(t(this),"ui-dialog-resizing"),i._unblockFrames(),i._trigger("resizeStop",n,e(o))}}).css("position",o)},_trackFocus:function(){this._on(this.widget(),{focusin:function(e){this._makeFocusTarget(),this._focusedElement=t(e.target)}})},_makeFocusTarget:function(){this._untrackInstance(),this._trackingInstances().unshift(this)},_untrackInstance:function(){var e=this._trackingInstances(),i=t.inArray(this,e);-1!==i&&e.splice(i,1)},_trackingInstances:function(){var t=this.document.data("ui-dialog-instances");return t||(t=[],this.document.data("ui-dialog-instances",t)),t},_minHeight:function(){var t=this.options;return"auto"===t.height?t.minHeight:Math.min(t.minHeight,t.height)},_position:function(){var t=this.uiDialog.is(":visible");t||this.uiDialog.show(),this.uiDialog.position(this.options.position),t||this.uiDialog.hide()},_setOptions:function(e){var i=this,s=!1,n={};t.each(e,function(t,e){i._setOption(t,e),t in i.sizeRelatedOptions&&(s=!0),t in i.resizableRelatedOptions&&(n[t]=e)}),s&&(this._size(),this._position()),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option",n)},_setOption:function(e,i){var s,n,o=this.uiDialog;"disabled"!==e&&(this._super(e,i),"appendTo"===e&&this.uiDialog.appendTo(this._appendTo()),"buttons"===e&&this._createButtons(),"closeText"===e&&this.uiDialogTitlebarClose.button({label:t("").text(""+this.options.closeText).html()}),"draggable"===e&&(s=o.is(":data(ui-draggable)"),s&&!i&&o.draggable("destroy"),!s&&i&&this._makeDraggable()),"position"===e&&this._position(),"resizable"===e&&(n=o.is(":data(ui-resizable)"),n&&!i&&o.resizable("destroy"),n&&"string"==typeof i&&o.resizable("option","handles",i),n||i===!1||this._makeResizable()),"title"===e&&this._title(this.uiDialogTitlebar.find(".ui-dialog-title")))},_size:function(){var t,e,i,s=this.options;this.element.show().css({width:"auto",minHeight:0,maxHeight:"none",height:0}),s.minWidth>s.width&&(s.width=s.minWidth),t=this.uiDialog.css({height:"auto",width:s.width}).outerHeight(),e=Math.max(0,s.minHeight-t),i="number"==typeof s.maxHeight?Math.max(0,s.maxHeight-t):"none","auto"===s.height?this.element.css({minHeight:e,maxHeight:i,height:"auto"}):this.element.height(Math.max(0,s.height-t)),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())},_blockFrames:function(){this.iframeBlocks=this.document.find("iframe").map(function(){var e=t(this);return t("
    ").css({position:"absolute",width:e.outerWidth(),height:e.outerHeight()}).appendTo(e.parent()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_allowInteraction:function(e){return t(e.target).closest(".ui-dialog").length?!0:!!t(e.target).closest(".ui-datepicker").length},_createOverlay:function(){if(this.options.modal){var e=!0;this._delay(function(){e=!1}),this.document.data("ui-dialog-overlays")||this._on(this.document,{focusin:function(t){e||this._allowInteraction(t)||(t.preventDefault(),this._trackingInstances()[0]._focusTabbable())}}),this.overlay=t("
    ").appendTo(this._appendTo()),this._addClass(this.overlay,null,"ui-widget-overlay ui-front"),this._on(this.overlay,{mousedown:"_keepFocus"}),this.document.data("ui-dialog-overlays",(this.document.data("ui-dialog-overlays")||0)+1)}},_destroyOverlay:function(){if(this.options.modal&&this.overlay){var t=this.document.data("ui-dialog-overlays")-1;t?this.document.data("ui-dialog-overlays",t):(this._off(this.document,"focusin"),this.document.removeData("ui-dialog-overlays")),this.overlay.remove(),this.overlay=null}}}),t.uiBackCompat!==!1&&t.widget("ui.dialog",t.ui.dialog,{options:{dialogClass:""},_createWrapper:function(){this._super(),this.uiDialog.addClass(this.options.dialogClass)},_setOption:function(t,e){"dialogClass"===t&&this.uiDialog.removeClass(this.options.dialogClass).addClass(e),this._superApply(arguments)}}),t.ui.dialog,t.widget("ui.droppable",{version:"1.12.1",widgetEventPrefix:"drop",options:{accept:"*",addClasses:!0,greedy:!1,scope:"default",tolerance:"intersect",activate:null,deactivate:null,drop:null,out:null,over:null},_create:function(){var e,i=this.options,s=i.accept;this.isover=!1,this.isout=!0,this.accept=t.isFunction(s)?s:function(t){return t.is(s)},this.proportions=function(){return arguments.length?(e=arguments[0],void 0):e?e:e={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight}},this._addToManager(i.scope),i.addClasses&&this._addClass("ui-droppable")},_addToManager:function(e){t.ui.ddmanager.droppables[e]=t.ui.ddmanager.droppables[e]||[],t.ui.ddmanager.droppables[e].push(this)},_splice:function(t){for(var e=0;t.length>e;e++)t[e]===this&&t.splice(e,1)},_destroy:function(){var e=t.ui.ddmanager.droppables[this.options.scope];this._splice(e)},_setOption:function(e,i){if("accept"===e)this.accept=t.isFunction(i)?i:function(t){return t.is(i)};else if("scope"===e){var s=t.ui.ddmanager.droppables[this.options.scope];this._splice(s),this._addToManager(i)}this._super(e,i)},_activate:function(e){var i=t.ui.ddmanager.current;this._addActiveClass(),i&&this._trigger("activate",e,this.ui(i))},_deactivate:function(e){var i=t.ui.ddmanager.current;this._removeActiveClass(),i&&this._trigger("deactivate",e,this.ui(i))},_over:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._addHoverClass(),this._trigger("over",e,this.ui(i)))},_out:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._removeHoverClass(),this._trigger("out",e,this.ui(i)))},_drop:function(e,i){var s=i||t.ui.ddmanager.current,n=!1;return s&&(s.currentItem||s.element)[0]!==this.element[0]?(this.element.find(":data(ui-droppable)").not(".ui-draggable-dragging").each(function(){var i=t(this).droppable("instance");return i.options.greedy&&!i.options.disabled&&i.options.scope===s.options.scope&&i.accept.call(i.element[0],s.currentItem||s.element)&&v(s,t.extend(i,{offset:i.element.offset()}),i.options.tolerance,e)?(n=!0,!1):void 0}),n?!1:this.accept.call(this.element[0],s.currentItem||s.element)?(this._removeActiveClass(),this._removeHoverClass(),this._trigger("drop",e,this.ui(s)),this.element):!1):!1},ui:function(t){return{draggable:t.currentItem||t.element,helper:t.helper,position:t.position,offset:t.positionAbs}},_addHoverClass:function(){this._addClass("ui-droppable-hover")},_removeHoverClass:function(){this._removeClass("ui-droppable-hover")},_addActiveClass:function(){this._addClass("ui-droppable-active")},_removeActiveClass:function(){this._removeClass("ui-droppable-active")}});var v=t.ui.intersect=function(){function t(t,e,i){return t>=e&&e+i>t}return function(e,i,s,n){if(!i.offset)return!1;var o=(e.positionAbs||e.position.absolute).left+e.margins.left,a=(e.positionAbs||e.position.absolute).top+e.margins.top,r=o+e.helperProportions.width,h=a+e.helperProportions.height,l=i.offset.left,c=i.offset.top,u=l+i.proportions().width,d=c+i.proportions().height;switch(s){case"fit":return o>=l&&u>=r&&a>=c&&d>=h;case"intersect":return o+e.helperProportions.width/2>l&&u>r-e.helperProportions.width/2&&a+e.helperProportions.height/2>c&&d>h-e.helperProportions.height/2;case"pointer":return t(n.pageY,c,i.proportions().height)&&t(n.pageX,l,i.proportions().width);case"touch":return(a>=c&&d>=a||h>=c&&d>=h||c>a&&h>d)&&(o>=l&&u>=o||r>=l&&u>=r||l>o&&r>u);default:return!1}}}();t.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(e,i){var s,n,o=t.ui.ddmanager.droppables[e.options.scope]||[],a=i?i.type:null,r=(e.currentItem||e.element).find(":data(ui-droppable)").addBack();t:for(s=0;o.length>s;s++)if(!(o[s].options.disabled||e&&!o[s].accept.call(o[s].element[0],e.currentItem||e.element))){for(n=0;r.length>n;n++)if(r[n]===o[s].element[0]){o[s].proportions().height=0;continue t}o[s].visible="none"!==o[s].element.css("display"),o[s].visible&&("mousedown"===a&&o[s]._activate.call(o[s],i),o[s].offset=o[s].element.offset(),o[s].proportions({width:o[s].element[0].offsetWidth,height:o[s].element[0].offsetHeight}))}},drop:function(e,i){var s=!1;return t.each((t.ui.ddmanager.droppables[e.options.scope]||[]).slice(),function(){this.options&&(!this.options.disabled&&this.visible&&v(e,this,this.options.tolerance,i)&&(s=this._drop.call(this,i)||s),!this.options.disabled&&this.visible&&this.accept.call(this.element[0],e.currentItem||e.element)&&(this.isout=!0,this.isover=!1,this._deactivate.call(this,i)))}),s},dragStart:function(e,i){e.element.parentsUntil("body").on("scroll.droppable",function(){e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)})},drag:function(e,i){e.options.refreshPositions&&t.ui.ddmanager.prepareOffsets(e,i),t.each(t.ui.ddmanager.droppables[e.options.scope]||[],function(){if(!this.options.disabled&&!this.greedyChild&&this.visible){var s,n,o,a=v(e,this,this.options.tolerance,i),r=!a&&this.isover?"isout":a&&!this.isover?"isover":null;r&&(this.options.greedy&&(n=this.options.scope,o=this.element.parents(":data(ui-droppable)").filter(function(){return t(this).droppable("instance").options.scope===n}),o.length&&(s=t(o[0]).droppable("instance"),s.greedyChild="isover"===r)),s&&"isover"===r&&(s.isover=!1,s.isout=!0,s._out.call(s,i)),this[r]=!0,this["isout"===r?"isover":"isout"]=!1,this["isover"===r?"_over":"_out"].call(this,i),s&&"isout"===r&&(s.isout=!1,s.isover=!0,s._over.call(s,i)))}})},dragStop:function(e,i){e.element.parentsUntil("body").off("scroll.droppable"),e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)}},t.uiBackCompat!==!1&&t.widget("ui.droppable",t.ui.droppable,{options:{hoverClass:!1,activeClass:!1},_addActiveClass:function(){this._super(),this.options.activeClass&&this.element.addClass(this.options.activeClass)},_removeActiveClass:function(){this._super(),this.options.activeClass&&this.element.removeClass(this.options.activeClass)},_addHoverClass:function(){this._super(),this.options.hoverClass&&this.element.addClass(this.options.hoverClass)},_removeHoverClass:function(){this._super(),this.options.hoverClass&&this.element.removeClass(this.options.hoverClass)}}),t.ui.droppable,t.widget("ui.progressbar",{version:"1.12.1",options:{classes:{"ui-progressbar":"ui-corner-all","ui-progressbar-value":"ui-corner-left","ui-progressbar-complete":"ui-corner-right"},max:100,value:0,change:null,complete:null},min:0,_create:function(){this.oldValue=this.options.value=this._constrainedValue(),this.element.attr({role:"progressbar","aria-valuemin":this.min}),this._addClass("ui-progressbar","ui-widget ui-widget-content"),this.valueDiv=t("
    ").appendTo(this.element),this._addClass(this.valueDiv,"ui-progressbar-value","ui-widget-header"),this._refreshValue()},_destroy:function(){this.element.removeAttr("role aria-valuemin aria-valuemax aria-valuenow"),this.valueDiv.remove()},value:function(t){return void 0===t?this.options.value:(this.options.value=this._constrainedValue(t),this._refreshValue(),void 0)},_constrainedValue:function(t){return void 0===t&&(t=this.options.value),this.indeterminate=t===!1,"number"!=typeof t&&(t=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,t))},_setOptions:function(t){var e=t.value;delete t.value,this._super(t),this.options.value=this._constrainedValue(e),this._refreshValue()},_setOption:function(t,e){"max"===t&&(e=Math.max(this.min,e)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t)},_percentage:function(){return this.indeterminate?100:100*(this.options.value-this.min)/(this.options.max-this.min)},_refreshValue:function(){var e=this.options.value,i=this._percentage();this.valueDiv.toggle(this.indeterminate||e>this.min).width(i.toFixed(0)+"%"),this._toggleClass(this.valueDiv,"ui-progressbar-complete",null,e===this.options.max)._toggleClass("ui-progressbar-indeterminate",null,this.indeterminate),this.indeterminate?(this.element.removeAttr("aria-valuenow"),this.overlayDiv||(this.overlayDiv=t("
    ").appendTo(this.valueDiv),this._addClass(this.overlayDiv,"ui-progressbar-overlay"))):(this.element.attr({"aria-valuemax":this.options.max,"aria-valuenow":e}),this.overlayDiv&&(this.overlayDiv.remove(),this.overlayDiv=null)),this.oldValue!==e&&(this.oldValue=e,this._trigger("change")),e===this.options.max&&this._trigger("complete")}}),t.widget("ui.selectable",t.ui.mouse,{version:"1.12.1",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch",selected:null,selecting:null,start:null,stop:null,unselected:null,unselecting:null},_create:function(){var e=this;this._addClass("ui-selectable"),this.dragged=!1,this.refresh=function(){e.elementPos=t(e.element[0]).offset(),e.selectees=t(e.options.filter,e.element[0]),e._addClass(e.selectees,"ui-selectee"),e.selectees.each(function(){var i=t(this),s=i.offset(),n={left:s.left-e.elementPos.left,top:s.top-e.elementPos.top};t.data(this,"selectable-item",{element:this,$element:i,left:n.left,top:n.top,right:n.left+i.outerWidth(),bottom:n.top+i.outerHeight(),startselected:!1,selected:i.hasClass("ui-selected"),selecting:i.hasClass("ui-selecting"),unselecting:i.hasClass("ui-unselecting")})})},this.refresh(),this._mouseInit(),this.helper=t("
    "),this._addClass(this.helper,"ui-selectable-helper")},_destroy:function(){this.selectees.removeData("selectable-item"),this._mouseDestroy()},_mouseStart:function(e){var i=this,s=this.options;this.opos=[e.pageX,e.pageY],this.elementPos=t(this.element[0]).offset(),this.options.disabled||(this.selectees=t(s.filter,this.element[0]),this._trigger("start",e),t(s.appendTo).append(this.helper),this.helper.css({left:e.pageX,top:e.pageY,width:0,height:0}),s.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var s=t.data(this,"selectable-item");s.startselected=!0,e.metaKey||e.ctrlKey||(i._removeClass(s.$element,"ui-selected"),s.selected=!1,i._addClass(s.$element,"ui-unselecting"),s.unselecting=!0,i._trigger("unselecting",e,{unselecting:s.element}))}),t(e.target).parents().addBack().each(function(){var s,n=t.data(this,"selectable-item");return n?(s=!e.metaKey&&!e.ctrlKey||!n.$element.hasClass("ui-selected"),i._removeClass(n.$element,s?"ui-unselecting":"ui-selected")._addClass(n.$element,s?"ui-selecting":"ui-unselecting"),n.unselecting=!s,n.selecting=s,n.selected=s,s?i._trigger("selecting",e,{selecting:n.element}):i._trigger("unselecting",e,{unselecting:n.element}),!1):void 0}))},_mouseDrag:function(e){if(this.dragged=!0,!this.options.disabled){var i,s=this,n=this.options,o=this.opos[0],a=this.opos[1],r=e.pageX,h=e.pageY;return o>r&&(i=r,r=o,o=i),a>h&&(i=h,h=a,a=i),this.helper.css({left:o,top:a,width:r-o,height:h-a}),this.selectees.each(function(){var i=t.data(this,"selectable-item"),l=!1,c={};i&&i.element!==s.element[0]&&(c.left=i.left+s.elementPos.left,c.right=i.right+s.elementPos.left,c.top=i.top+s.elementPos.top,c.bottom=i.bottom+s.elementPos.top,"touch"===n.tolerance?l=!(c.left>r||o>c.right||c.top>h||a>c.bottom):"fit"===n.tolerance&&(l=c.left>o&&r>c.right&&c.top>a&&h>c.bottom),l?(i.selected&&(s._removeClass(i.$element,"ui-selected"),i.selected=!1),i.unselecting&&(s._removeClass(i.$element,"ui-unselecting"),i.unselecting=!1),i.selecting||(s._addClass(i.$element,"ui-selecting"),i.selecting=!0,s._trigger("selecting",e,{selecting:i.element}))):(i.selecting&&((e.metaKey||e.ctrlKey)&&i.startselected?(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,s._addClass(i.$element,"ui-selected"),i.selected=!0):(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,i.startselected&&(s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0),s._trigger("unselecting",e,{unselecting:i.element}))),i.selected&&(e.metaKey||e.ctrlKey||i.startselected||(s._removeClass(i.$element,"ui-selected"),i.selected=!1,s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0,s._trigger("unselecting",e,{unselecting:i.element})))))}),!1}},_mouseStop:function(e){var i=this;return this.dragged=!1,t(".ui-unselecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-unselecting"),s.unselecting=!1,s.startselected=!1,i._trigger("unselected",e,{unselected:s.element})}),t(".ui-selecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-selecting")._addClass(s.$element,"ui-selected"),s.selecting=!1,s.selected=!0,s.startselected=!0,i._trigger("selected",e,{selected:s.element})}),this._trigger("stop",e),this.helper.remove(),!1}}),t.widget("ui.selectmenu",[t.ui.formResetMixin,{version:"1.12.1",defaultElement:"",widgetEventPrefix:"spin",options:{classes:{"ui-spinner":"ui-corner-all","ui-spinner-down":"ui-corner-br","ui-spinner-up":"ui-corner-tr"},culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),""!==this.value()&&this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var e=this._super(),i=this.element;return t.each(["min","max","step"],function(t,s){var n=i.attr(s);null!=n&&n.length&&(e[s]=n)}),e},_events:{keydown:function(t){this._start(t)&&this._keydown(t)&&t.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(this._stop(),this._refresh(),this.previous!==this.element.val()&&this._trigger("change",t),void 0)},mousewheel:function(t,e){if(e){if(!this.spinning&&!this._start(t))return!1;this._spin((e>0?1:-1)*this.options.step,t),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(t)},100),t.preventDefault()}},"mousedown .ui-spinner-button":function(e){function i(){var e=this.element[0]===t.ui.safeActiveElement(this.document[0]);e||(this.element.trigger("focus"),this.previous=s,this._delay(function(){this.previous=s}))}var s;s=this.element[0]===t.ui.safeActiveElement(this.document[0])?this.previous:this.element.val(),e.preventDefault(),i.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,i.call(this)}),this._start(e)!==!1&&this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(e){return t(e.currentTarget).hasClass("ui-state-active")?this._start(e)===!1?!1:(this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e),void 0):void 0},"mouseleave .ui-spinner-button":"_stop"},_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap("").parent().append("")},_draw:function(){this._enhance(),this._addClass(this.uiSpinner,"ui-spinner","ui-widget ui-widget-content"),this._addClass("ui-spinner-input"),this.element.attr("role","spinbutton"),this.buttons=this.uiSpinner.children("a").attr("tabIndex",-1).attr("aria-hidden",!0).button({classes:{"ui-button":""}}),this._removeClass(this.buttons,"ui-corner-all"),this._addClass(this.buttons.first(),"ui-spinner-button ui-spinner-up"),this._addClass(this.buttons.last(),"ui-spinner-button ui-spinner-down"),this.buttons.first().button({icon:this.options.icons.up,showLabel:!1}),this.buttons.last().button({icon:this.options.icons.down,showLabel:!1}),this.buttons.height()>Math.ceil(.5*this.uiSpinner.height())&&this.uiSpinner.height()>0&&this.uiSpinner.height(this.uiSpinner.height())},_keydown:function(e){var i=this.options,s=t.ui.keyCode;switch(e.keyCode){case s.UP:return this._repeat(null,1,e),!0;case s.DOWN:return this._repeat(null,-1,e),!0;case s.PAGE_UP:return this._repeat(null,i.page,e),!0;case s.PAGE_DOWN:return this._repeat(null,-i.page,e),!0}return!1},_start:function(t){return this.spinning||this._trigger("start",t)!==!1?(this.counter||(this.counter=1),this.spinning=!0,!0):!1},_repeat:function(t,e,i){t=t||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,e,i)},t),this._spin(e*this.options.step,i)},_spin:function(t,e){var i=this.value()||0;this.counter||(this.counter=1),i=this._adjustValue(i+t*this._increment(this.counter)),this.spinning&&this._trigger("spin",e,{value:i})===!1||(this._value(i),this.counter++)},_increment:function(e){var i=this.options.incremental;return i?t.isFunction(i)?i(e):Math.floor(e*e*e/5e4-e*e/500+17*e/200+1):1},_precision:function(){var t=this._precisionOf(this.options.step);return null!==this.options.min&&(t=Math.max(t,this._precisionOf(this.options.min))),t},_precisionOf:function(t){var e=""+t,i=e.indexOf(".");return-1===i?0:e.length-i-1},_adjustValue:function(t){var e,i,s=this.options;return e=null!==s.min?s.min:0,i=t-e,i=Math.round(i/s.step)*s.step,t=e+i,t=parseFloat(t.toFixed(this._precision())),null!==s.max&&t>s.max?s.max:null!==s.min&&s.min>t?s.min:t},_stop:function(t){this.spinning&&(clearTimeout(this.timer),clearTimeout(this.mousewheelTimer),this.counter=0,this.spinning=!1,this._trigger("stop",t))},_setOption:function(t,e){var i,s,n;return"culture"===t||"numberFormat"===t?(i=this._parse(this.element.val()),this.options[t]=e,this.element.val(this._format(i)),void 0):(("max"===t||"min"===t||"step"===t)&&"string"==typeof e&&(e=this._parse(e)),"icons"===t&&(s=this.buttons.first().find(".ui-icon"),this._removeClass(s,null,this.options.icons.up),this._addClass(s,null,e.up),n=this.buttons.last().find(".ui-icon"),this._removeClass(n,null,this.options.icons.down),this._addClass(n,null,e.down)),this._super(t,e),void 0)},_setOptionDisabled:function(t){this._super(t),this._toggleClass(this.uiSpinner,null,"ui-state-disabled",!!t),this.element.prop("disabled",!!t),this.buttons.button(t?"disable":"enable")},_setOptions:r(function(t){this._super(t)}),_parse:function(t){return"string"==typeof t&&""!==t&&(t=window.Globalize&&this.options.numberFormat?Globalize.parseFloat(t,10,this.options.culture):+t),""===t||isNaN(t)?null:t},_format:function(t){return""===t?"":window.Globalize&&this.options.numberFormat?Globalize.format(t,this.options.numberFormat,this.options.culture):t},_refresh:function(){this.element.attr({"aria-valuemin":this.options.min,"aria-valuemax":this.options.max,"aria-valuenow":this._parse(this.element.val())})},isValid:function(){var t=this.value();return null===t?!1:t===this._adjustValue(t)},_value:function(t,e){var i;""!==t&&(i=this._parse(t),null!==i&&(e||(i=this._adjustValue(i)),t=this._format(i))),this.element.val(t),this._refresh()},_destroy:function(){this.element.prop("disabled",!1).removeAttr("autocomplete role aria-valuemin aria-valuemax aria-valuenow"),this.uiSpinner.replaceWith(this.element)},stepUp:r(function(t){this._stepUp(t)}),_stepUp:function(t){this._start()&&(this._spin((t||1)*this.options.step),this._stop())},stepDown:r(function(t){this._stepDown(t)}),_stepDown:function(t){this._start()&&(this._spin((t||1)*-this.options.step),this._stop())},pageUp:r(function(t){this._stepUp((t||1)*this.options.page)}),pageDown:r(function(t){this._stepDown((t||1)*this.options.page)}),value:function(t){return arguments.length?(r(this._value).call(this,t),void 0):this._parse(this.element.val())},widget:function(){return this.uiSpinner}}),t.uiBackCompat!==!1&&t.widget("ui.spinner",t.ui.spinner,{_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml())},_uiSpinnerHtml:function(){return""},_buttonHtml:function(){return""}}),t.ui.spinner,t.widget("ui.tabs",{version:"1.12.1",delay:300,options:{active:null,classes:{"ui-tabs":"ui-corner-all","ui-tabs-nav":"ui-corner-all","ui-tabs-panel":"ui-corner-bottom","ui-tabs-tab":"ui-corner-top"},collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_isLocal:function(){var t=/#.*$/;return function(e){var i,s;i=e.href.replace(t,""),s=location.href.replace(t,"");try{i=decodeURIComponent(i)}catch(n){}try{s=decodeURIComponent(s)}catch(n){}return e.hash.length>1&&i===s}}(),_create:function(){var e=this,i=this.options;this.running=!1,this._addClass("ui-tabs","ui-widget ui-widget-content"),this._toggleClass("ui-tabs-collapsible",null,i.collapsible),this._processTabs(),i.active=this._initialActive(),t.isArray(i.disabled)&&(i.disabled=t.unique(i.disabled.concat(t.map(this.tabs.filter(".ui-state-disabled"),function(t){return e.tabs.index(t)}))).sort()),this.active=this.options.active!==!1&&this.anchors.length?this._findActive(i.active):t(),this._refresh(),this.active.length&&this.load(i.active)},_initialActive:function(){var e=this.options.active,i=this.options.collapsible,s=location.hash.substring(1);return null===e&&(s&&this.tabs.each(function(i,n){return t(n).attr("aria-controls")===s?(e=i,!1):void 0}),null===e&&(e=this.tabs.index(this.tabs.filter(".ui-tabs-active"))),(null===e||-1===e)&&(e=this.tabs.length?0:!1)),e!==!1&&(e=this.tabs.index(this.tabs.eq(e)),-1===e&&(e=i?!1:0)),!i&&e===!1&&this.anchors.length&&(e=0),e},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):t()}},_tabKeydown:function(e){var i=t(t.ui.safeActiveElement(this.document[0])).closest("li"),s=this.tabs.index(i),n=!0;if(!this._handlePageNav(e)){switch(e.keyCode){case t.ui.keyCode.RIGHT:case t.ui.keyCode.DOWN:s++;break;case t.ui.keyCode.UP:case t.ui.keyCode.LEFT:n=!1,s--;break;case t.ui.keyCode.END:s=this.anchors.length-1;break;case t.ui.keyCode.HOME:s=0;break;case t.ui.keyCode.SPACE:return e.preventDefault(),clearTimeout(this.activating),this._activate(s),void 0;case t.ui.keyCode.ENTER:return e.preventDefault(),clearTimeout(this.activating),this._activate(s===this.options.active?!1:s),void 0;default:return}e.preventDefault(),clearTimeout(this.activating),s=this._focusNextTab(s,n),e.ctrlKey||e.metaKey||(i.attr("aria-selected","false"),this.tabs.eq(s).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",s)},this.delay))}},_panelKeydown:function(e){this._handlePageNav(e)||e.ctrlKey&&e.keyCode===t.ui.keyCode.UP&&(e.preventDefault(),this.active.trigger("focus"))},_handlePageNav:function(e){return e.altKey&&e.keyCode===t.ui.keyCode.PAGE_UP?(this._activate(this._focusNextTab(this.options.active-1,!1)),!0):e.altKey&&e.keyCode===t.ui.keyCode.PAGE_DOWN?(this._activate(this._focusNextTab(this.options.active+1,!0)),!0):void 0},_findNextTab:function(e,i){function s(){return e>n&&(e=0),0>e&&(e=n),e}for(var n=this.tabs.length-1;-1!==t.inArray(s(),this.options.disabled);)e=i?e+1:e-1;return e},_focusNextTab:function(t,e){return t=this._findNextTab(t,e),this.tabs.eq(t).trigger("focus"),t},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):(this._super(t,e),"collapsible"===t&&(this._toggleClass("ui-tabs-collapsible",null,e),e||this.options.active!==!1||this._activate(0)),"event"===t&&this._setupEvents(e),"heightStyle"===t&&this._setupHeightStyle(e),void 0)},_sanitizeSelector:function(t){return t?t.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var e=this.options,i=this.tablist.children(":has(a[href])");e.disabled=t.map(i.filter(".ui-state-disabled"),function(t){return i.index(t)}),this._processTabs(),e.active!==!1&&this.anchors.length?this.active.length&&!t.contains(this.tablist[0],this.active[0])?this.tabs.length===e.disabled.length?(e.active=!1,this.active=t()):this._activate(this._findNextTab(Math.max(0,e.active-1),!1)):e.active=this.tabs.index(this.active):(e.active=!1,this.active=t()),this._refresh()},_refresh:function(){this._setOptionDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-hidden":"true"}),this.active.length?(this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}),this._addClass(this.active,"ui-tabs-active","ui-state-active"),this._getPanelForTab(this.active).show().attr({"aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var e=this,i=this.tabs,s=this.anchors,n=this.panels;this.tablist=this._getList().attr("role","tablist"),this._addClass(this.tablist,"ui-tabs-nav","ui-helper-reset ui-helper-clearfix ui-widget-header"),this.tablist.on("mousedown"+this.eventNamespace,"> li",function(e){t(this).is(".ui-state-disabled")&&e.preventDefault()}).on("focus"+this.eventNamespace,".ui-tabs-anchor",function(){t(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this.tabs=this.tablist.find("> li:has(a[href])").attr({role:"tab",tabIndex:-1}),this._addClass(this.tabs,"ui-tabs-tab","ui-state-default"),this.anchors=this.tabs.map(function(){return t("a",this)[0]}).attr({role:"presentation",tabIndex:-1}),this._addClass(this.anchors,"ui-tabs-anchor"),this.panels=t(),this.anchors.each(function(i,s){var n,o,a,r=t(s).uniqueId().attr("id"),h=t(s).closest("li"),l=h.attr("aria-controls");e._isLocal(s)?(n=s.hash,a=n.substring(1),o=e.element.find(e._sanitizeSelector(n))):(a=h.attr("aria-controls")||t({}).uniqueId()[0].id,n="#"+a,o=e.element.find(n),o.length||(o=e._createPanel(a),o.insertAfter(e.panels[i-1]||e.tablist)),o.attr("aria-live","polite")),o.length&&(e.panels=e.panels.add(o)),l&&h.data("ui-tabs-aria-controls",l),h.attr({"aria-controls":a,"aria-labelledby":r}),o.attr("aria-labelledby",r)}),this.panels.attr("role","tabpanel"),this._addClass(this.panels,"ui-tabs-panel","ui-widget-content"),i&&(this._off(i.not(this.tabs)),this._off(s.not(this.anchors)),this._off(n.not(this.panels)))},_getList:function(){return this.tablist||this.element.find("ol, ul").eq(0)},_createPanel:function(e){return t("
    ").attr("id",e).data("ui-tabs-destroy",!0)},_setOptionDisabled:function(e){var i,s,n;for(t.isArray(e)&&(e.length?e.length===this.anchors.length&&(e=!0):e=!1),n=0;s=this.tabs[n];n++)i=t(s),e===!0||-1!==t.inArray(n,e)?(i.attr("aria-disabled","true"),this._addClass(i,null,"ui-state-disabled")):(i.removeAttr("aria-disabled"),this._removeClass(i,null,"ui-state-disabled"));this.options.disabled=e,this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,e===!0)},_setupEvents:function(e){var i={};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(!0,this.anchors,{click:function(t){t.preventDefault()}}),this._on(this.anchors,i),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(e){var i,s=this.element.parent();"fill"===e?(i=s.height(),i-=this.element.outerHeight()-this.element.height(),this.element.siblings(":visible").each(function(){var e=t(this),s=e.css("position");"absolute"!==s&&"fixed"!==s&&(i-=e.outerHeight(!0))}),this.element.children().not(this.panels).each(function(){i-=t(this).outerHeight(!0)}),this.panels.each(function(){t(this).height(Math.max(0,i-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===e&&(i=0,this.panels.each(function(){i=Math.max(i,t(this).height("").height())}).height(i))},_eventHandler:function(e){var i=this.options,s=this.active,n=t(e.currentTarget),o=n.closest("li"),a=o[0]===s[0],r=a&&i.collapsible,h=r?t():this._getPanelForTab(o),l=s.length?this._getPanelForTab(s):t(),c={oldTab:s,oldPanel:l,newTab:r?t():o,newPanel:h};e.preventDefault(),o.hasClass("ui-state-disabled")||o.hasClass("ui-tabs-loading")||this.running||a&&!i.collapsible||this._trigger("beforeActivate",e,c)===!1||(i.active=r?!1:this.tabs.index(o),this.active=a?t():o,this.xhr&&this.xhr.abort(),l.length||h.length||t.error("jQuery UI Tabs: Mismatching fragment identifier."),h.length&&this.load(this.tabs.index(o),e),this._toggle(e,c))},_toggle:function(e,i){function s(){o.running=!1,o._trigger("activate",e,i)}function n(){o._addClass(i.newTab.closest("li"),"ui-tabs-active","ui-state-active"),a.length&&o.options.show?o._show(a,o.options.show,s):(a.show(),s())}var o=this,a=i.newPanel,r=i.oldPanel;this.running=!0,r.length&&this.options.hide?this._hide(r,this.options.hide,function(){o._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),n()}):(this._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),r.hide(),n()),r.attr("aria-hidden","true"),i.oldTab.attr({"aria-selected":"false","aria-expanded":"false"}),a.length&&r.length?i.oldTab.attr("tabIndex",-1):a.length&&this.tabs.filter(function(){return 0===t(this).attr("tabIndex")}).attr("tabIndex",-1),a.attr("aria-hidden","false"),i.newTab.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_activate:function(e){var i,s=this._findActive(e);s[0]!==this.active[0]&&(s.length||(s=this.active),i=s.find(".ui-tabs-anchor")[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return e===!1?t():this.tabs.eq(e)},_getIndex:function(e){return"string"==typeof e&&(e=this.anchors.index(this.anchors.filter("[href$='"+t.ui.escapeSelector(e)+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.tablist.removeAttr("role").off(this.eventNamespace),this.anchors.removeAttr("role tabIndex").removeUniqueId(),this.tabs.add(this.panels).each(function(){t.data(this,"ui-tabs-destroy")?t(this).remove():t(this).removeAttr("role tabIndex aria-live aria-busy aria-selected aria-labelledby aria-hidden aria-expanded")}),this.tabs.each(function(){var e=t(this),i=e.data("ui-tabs-aria-controls");i?e.attr("aria-controls",i).removeData("ui-tabs-aria-controls"):e.removeAttr("aria-controls")}),this.panels.show(),"content"!==this.options.heightStyle&&this.panels.css("height","")},enable:function(e){var i=this.options.disabled;i!==!1&&(void 0===e?i=!1:(e=this._getIndex(e),i=t.isArray(i)?t.map(i,function(t){return t!==e?t:null}):t.map(this.tabs,function(t,i){return i!==e?i:null})),this._setOptionDisabled(i))},disable:function(e){var i=this.options.disabled;if(i!==!0){if(void 0===e)i=!0;else{if(e=this._getIndex(e),-1!==t.inArray(e,i))return;i=t.isArray(i)?t.merge([e],i).sort():[e]}this._setOptionDisabled(i)}},load:function(e,i){e=this._getIndex(e);var s=this,n=this.tabs.eq(e),o=n.find(".ui-tabs-anchor"),a=this._getPanelForTab(n),r={tab:n,panel:a},h=function(t,e){"abort"===e&&s.panels.stop(!1,!0),s._removeClass(n,"ui-tabs-loading"),a.removeAttr("aria-busy"),t===s.xhr&&delete s.xhr};this._isLocal(o[0])||(this.xhr=t.ajax(this._ajaxSettings(o,i,r)),this.xhr&&"canceled"!==this.xhr.statusText&&(this._addClass(n,"ui-tabs-loading"),a.attr("aria-busy","true"),this.xhr.done(function(t,e,n){setTimeout(function(){a.html(t),s._trigger("load",i,r),h(n,e)},1)}).fail(function(t,e){setTimeout(function(){h(t,e)},1)})))},_ajaxSettings:function(e,i,s){var n=this;return{url:e.attr("href").replace(/#.*$/,""),beforeSend:function(e,o){return n._trigger("beforeLoad",i,t.extend({jqXHR:e,ajaxSettings:o},s))}}},_getPanelForTab:function(e){var i=t(e).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+i))}}),t.uiBackCompat!==!1&&t.widget("ui.tabs",t.ui.tabs,{_processTabs:function(){this._superApply(arguments),this._addClass(this.tabs,"ui-tab")}}),t.ui.tabs,t.widget("ui.tooltip",{version:"1.12.1",options:{classes:{"ui-tooltip":"ui-corner-all ui-widget-shadow"},content:function(){var e=t(this).attr("title")||"";return t("").text(e).html()},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flip"},show:!0,track:!1,close:null,open:null},_addDescribedBy:function(e,i){var s=(e.attr("aria-describedby")||"").split(/\s+/);s.push(i),e.data("ui-tooltip-id",i).attr("aria-describedby",t.trim(s.join(" ")))},_removeDescribedBy:function(e){var i=e.data("ui-tooltip-id"),s=(e.attr("aria-describedby")||"").split(/\s+/),n=t.inArray(i,s);-1!==n&&s.splice(n,1),e.removeData("ui-tooltip-id"),s=t.trim(s.join(" ")),s?e.attr("aria-describedby",s):e.removeAttr("aria-describedby")},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.liveRegion=t("
    ").attr({role:"log","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this.disabledTitles=t([])},_setOption:function(e,i){var s=this;this._super(e,i),"content"===e&&t.each(this.tooltips,function(t,e){s._updateContent(e.element)})},_setOptionDisabled:function(t){this[t?"_disable":"_enable"]()},_disable:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur");n.target=n.currentTarget=s.element[0],e.close(n,!0)}),this.disabledTitles=this.disabledTitles.add(this.element.find(this.options.items).addBack().filter(function(){var e=t(this);return e.is("[title]")?e.data("ui-tooltip-title",e.attr("title")).removeAttr("title"):void 0}))},_enable:function(){this.disabledTitles.each(function(){var e=t(this);e.data("ui-tooltip-title")&&e.attr("title",e.data("ui-tooltip-title"))}),this.disabledTitles=t([])},open:function(e){var i=this,s=t(e?e.target:this.element).closest(this.options.items);s.length&&!s.data("ui-tooltip-id")&&(s.attr("title")&&s.data("ui-tooltip-title",s.attr("title")),s.data("ui-tooltip-open",!0),e&&"mouseover"===e.type&&s.parents().each(function(){var e,s=t(this);s.data("ui-tooltip-open")&&(e=t.Event("blur"),e.target=e.currentTarget=this,i.close(e,!0)),s.attr("title")&&(s.uniqueId(),i.parents[this.id]={element:this,title:s.attr("title")},s.attr("title",""))}),this._registerCloseHandlers(e,s),this._updateContent(s,e))},_updateContent:function(t,e){var i,s=this.options.content,n=this,o=e?e.type:null;return"string"==typeof s||s.nodeType||s.jquery?this._open(e,t,s):(i=s.call(t[0],function(i){n._delay(function(){t.data("ui-tooltip-open")&&(e&&(e.type=o),this._open(e,t,i))})}),i&&this._open(e,t,i),void 0)},_open:function(e,i,s){function n(t){l.of=t,a.is(":hidden")||a.position(l)}var o,a,r,h,l=t.extend({},this.options.position);if(s){if(o=this._find(i))return o.tooltip.find(".ui-tooltip-content").html(s),void 0;i.is("[title]")&&(e&&"mouseover"===e.type?i.attr("title",""):i.removeAttr("title")),o=this._tooltip(i),a=o.tooltip,this._addDescribedBy(i,a.attr("id")),a.find(".ui-tooltip-content").html(s),this.liveRegion.children().hide(),h=t("
    ").html(a.find(".ui-tooltip-content").html()),h.removeAttr("name").find("[name]").removeAttr("name"),h.removeAttr("id").find("[id]").removeAttr("id"),h.appendTo(this.liveRegion),this.options.track&&e&&/^mouse/.test(e.type)?(this._on(this.document,{mousemove:n}),n(e)):a.position(t.extend({of:i},this.options.position)),a.hide(),this._show(a,this.options.show),this.options.track&&this.options.show&&this.options.show.delay&&(r=this.delayedShow=setInterval(function(){a.is(":visible")&&(n(l.of),clearInterval(r))},t.fx.interval)),this._trigger("open",e,{tooltip:a})}},_registerCloseHandlers:function(e,i){var s={keyup:function(e){if(e.keyCode===t.ui.keyCode.ESCAPE){var s=t.Event(e);s.currentTarget=i[0],this.close(s,!0)}}};i[0]!==this.element[0]&&(s.remove=function(){this._removeTooltip(this._find(i).tooltip)}),e&&"mouseover"!==e.type||(s.mouseleave="close"),e&&"focusin"!==e.type||(s.focusout="close"),this._on(!0,i,s)},close:function(e){var i,s=this,n=t(e?e.currentTarget:this.element),o=this._find(n);return o?(i=o.tooltip,o.closing||(clearInterval(this.delayedShow),n.data("ui-tooltip-title")&&!n.attr("title")&&n.attr("title",n.data("ui-tooltip-title")),this._removeDescribedBy(n),o.hiding=!0,i.stop(!0),this._hide(i,this.options.hide,function(){s._removeTooltip(t(this))}),n.removeData("ui-tooltip-open"),this._off(n,"mouseleave focusout keyup"),n[0]!==this.element[0]&&this._off(n,"remove"),this._off(this.document,"mousemove"),e&&"mouseleave"===e.type&&t.each(this.parents,function(e,i){t(i.element).attr("title",i.title),delete s.parents[e]}),o.closing=!0,this._trigger("close",e,{tooltip:i}),o.hiding||(o.closing=!1)),void 0):(n.removeData("ui-tooltip-open"),void 0)},_tooltip:function(e){var i=t("
    ").attr("role","tooltip"),s=t("
    ").appendTo(i),n=i.uniqueId().attr("id");return this._addClass(s,"ui-tooltip-content"),this._addClass(i,"ui-tooltip","ui-widget ui-widget-content"),i.appendTo(this._appendTo(e)),this.tooltips[n]={element:e,tooltip:i}},_find:function(t){var e=t.data("ui-tooltip-id");return e?this.tooltips[e]:null},_removeTooltip:function(t){t.remove(),delete this.tooltips[t.attr("id")]},_appendTo:function(t){var e=t.closest(".ui-front, dialog");return e.length||(e=this.document[0].body),e},_destroy:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur"),o=s.element;n.target=n.currentTarget=o[0],e.close(n,!0),t("#"+i).remove(),o.data("ui-tooltip-title")&&(o.attr("title")||o.attr("title",o.data("ui-tooltip-title")),o.removeData("ui-tooltip-title"))}),this.liveRegion.remove()}}),t.uiBackCompat!==!1&&t.widget("ui.tooltip",t.ui.tooltip,{options:{tooltipClass:null},_tooltip:function(){var t=this._superApply(arguments);return this.options.tooltipClass&&t.tooltip.addClass(this.options.tooltipClass),t}}),t.ui.tooltip}); diff --git a/docs/waifu_plugin/jquery.min.js b/docs/waifu_plugin/jquery.min.js deleted file mode 100644 index ab28a24729b320bffd3d2f60302af949db39ab85..0000000000000000000000000000000000000000 --- a/docs/waifu_plugin/jquery.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="
    ",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="
    ","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h; -if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="
    a",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/\s*$/g,rb={option:[1,""],legend:[1,"
    ","
    "],area:[1,"",""],param:[1,"",""],thead:[1,"","
    "],tr:[2,"","
    "],col:[2,"","
    "],td:[3,"","
    "],_default:k.htmlSerialize?[0,"",""]:[1,"X
    ","
    "]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?""!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("