Tuchuanhuhuhu commited on
Commit
968cb26
1 Parent(s): 017dec1
Files changed (2) hide show
  1. config_example.json +2 -1
  2. modules/llama_func.py +3 -4
config_example.json CHANGED
@@ -5,7 +5,8 @@
5
  // 如果使用代理,请取消注释下面的两行,并替换代理URL
6
  // "https_proxy": "http://127.0.0.1:1079",
7
  // "http_proxy": "http://127.0.0.1:1079",
8
- "users": [],
 
9
  "advance_docs": {
10
  "pdf": {
11
  // 是否认为PDF是双栏的
 
5
  // 如果使用代理,请取消注释下面的两行,并替换代理URL
6
  // "https_proxy": "http://127.0.0.1:1079",
7
  // "http_proxy": "http://127.0.0.1:1079",
8
+ "users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
9
+ "local_embedding": false, //是否在本地编制索引
10
  "advance_docs": {
11
  "pdf": {
12
  // 是否认为PDF是双栏的
modules/llama_func.py CHANGED
@@ -111,13 +111,13 @@ def construct_index(
111
 
112
  if api_key:
113
  os.environ["OPENAI_API_KEY"] = api_key
 
 
 
114
  chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
115
  embedding_limit = None if embedding_limit == 0 else embedding_limit
116
  separator = " " if separator == "" else separator
117
 
118
- llm_predictor = LLMPredictor(
119
- llm=ChatOpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
120
- )
121
  prompt_helper = PromptHelper(
122
  max_input_size=max_input_size,
123
  num_output=num_outputs,
@@ -140,7 +140,6 @@ def construct_index(
140
  logging.info("构建索引中……")
141
  with retrieve_proxy():
142
  service_context = ServiceContext.from_defaults(
143
- llm_predictor=llm_predictor,
144
  prompt_helper=prompt_helper,
145
  chunk_size_limit=chunk_size_limit,
146
  embed_model=embed_model,
 
111
 
112
  if api_key:
113
  os.environ["OPENAI_API_KEY"] = api_key
114
+ else:
115
+ # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
116
+ os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
117
  chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
118
  embedding_limit = None if embedding_limit == 0 else embedding_limit
119
  separator = " " if separator == "" else separator
120
 
 
 
 
121
  prompt_helper = PromptHelper(
122
  max_input_size=max_input_size,
123
  num_output=num_outputs,
 
140
  logging.info("构建索引中……")
141
  with retrieve_proxy():
142
  service_context = ServiceContext.from_defaults(
 
143
  prompt_helper=prompt_helper,
144
  chunk_size_limit=chunk_size_limit,
145
  embed_model=embed_model,