ReadReview / config.ini
root
Update .gitignore to exclude rag/res
6f812af
raw
history blame
2.46 kB
[feature_store]
reject_throttle = 0
embedding_model_path = "/root/models/bce-embedding-base_v1"
reranker_model_path = "/root/models/bce-reranker-base_v1"
repo_dir = "repodir"
work_dir = "workdir"
n_clusters = [20, 50]
chunk_size = 1024
[web_search]
x_api_key = "${YOUR-API-KEY}"
domain_partial_order = ["openai.com", "pytorch.org", "readthedocs.io", "nvidia.com", "stackoverflow.com", "juejin.cn", "zhuanlan.zhihu.com", "www.cnblogs.com"]
save_dir = "logs/web_search_result"
[llm]
enable_local = 0
enable_remote = 1
client_url = "http://127.0.0.1:8888/inference"
[llm.server]
local_llm_path = "/root/models/Qwen1.5-7B-Chat"
local_llm_max_text_length = 32000
local_llm_bind_port = 8888
remote_type = "deepseek"
remote_api_key = "sk-f36f5336010841399abccdfeb6bd1f54"
remote_base_url = ""
remote_llm_max_text_length = 32000
remote_llm_model = "deepseek-chat"
rpm = 500
[worker]
enable_sg_search = 0
save_path = "logs/work.txt"
[worker.time]
start = "00:00:00"
end = "23:59:59"
has_weekday = 1
[sg_search]
binary_src_path = "/usr/local/bin/src"
src_access_token = "${YOUR-SRC-ACCESS-TOKEN}"
[sg_search.opencompass]
github_repo_id = "open-compass/opencompass"
introduction = "用于评测大型语言模型(LLM). 它提供了完整的开源可复现的评测框架,支持大语言模型、多模态模型的一站式评测,基于分布式技术,对大参数量模型亦能实现高效评测。评测方向汇总为知识、语言、理解、推理、考试五大能力维度,整合集纳了超过70个评测数据集,合计提供了超过40万个模型评测问题,并提供长文本、安全、代码3类大模型特色技术能力评测。"
[sg_search.lmdeploy]
github_repo_id = "internlm/lmdeploy"
introduction = "lmdeploy 是一个用于压缩、部署和服务 LLM(Large Language Model)的工具包。是一个服务端场景下,transformer 结构 LLM 部署工具,支持 GPU 服务端部署,速度有保障,支持 Tensor Parallel,多并发优化,功能全面,包括模型转换、缓存历史会话的 cache feature 等. 它还提供了 WebUI、命令行和 gRPC 客户端接入。"
[frontend]
type = "none"
webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxx"
message_process_policy = "immediate"
[frontend.lark_group]
app_id = "cli_a53a34dcb778500e"
app_secret = "2ajhg1ixSvlNm1bJkH4tJhPfTCsGGHT1"
encrypt_key = "abc"
verification_token = "def"
[frontend.wechat_personal]
bind_port = 9527