KevinHuSh
commited on
Commit
·
3abc590
1
Parent(s):
858916d
fix create dialog bug (#982)
Browse files### What problem does this PR solve?
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
- api/apps/dialog_app.py +1 -0
- api/utils/file_utils.py +1 -1
- deepdoc/parser/html_parser.py +14 -2
- rag/app/book.py +1 -1
api/apps/dialog_app.py
CHANGED
@@ -38,6 +38,7 @@ def set_dialog():
|
|
38 |
if not rerank_id: req["rerank_id"] = ""
|
39 |
similarity_threshold = req.get("similarity_threshold", 0.1)
|
40 |
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
|
|
41 |
llm_setting = req.get("llm_setting", {})
|
42 |
default_prompt = {
|
43 |
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
|
|
38 |
if not rerank_id: req["rerank_id"] = ""
|
39 |
similarity_threshold = req.get("similarity_threshold", 0.1)
|
40 |
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
41 |
+
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
42 |
llm_setting = req.get("llm_setting", {})
|
43 |
default_prompt = {
|
44 |
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
api/utils/file_utils.py
CHANGED
@@ -156,7 +156,7 @@ def filename_type(filename):
|
|
156 |
return FileType.PDF.value
|
157 |
|
158 |
if re.match(
|
159 |
-
|
160 |
return FileType.DOC.value
|
161 |
|
162 |
if re.match(
|
|
|
156 |
return FileType.PDF.value
|
157 |
|
158 |
if re.match(
|
159 |
+
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html)$", filename):
|
160 |
return FileType.DOC.value
|
161 |
|
162 |
if re.match(
|
deepdoc/parser/html_parser.py
CHANGED
@@ -1,4 +1,16 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from rag.nlp import find_codec
|
3 |
import readability
|
4 |
import html_text
|
@@ -8,7 +20,7 @@ def get_encoding(file):
|
|
8 |
with open(file,'rb') as f:
|
9 |
tmp = chardet.detect(f.read())
|
10 |
return tmp['encoding']
|
11 |
-
|
12 |
class RAGFlowHtmlParser:
|
13 |
def __call__(self, fnm, binary=None):
|
14 |
txt = ""
|
@@ -18,7 +30,7 @@ class RAGFlowHtmlParser:
|
|
18 |
else:
|
19 |
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
|
20 |
txt = f.read()
|
21 |
-
|
22 |
html_doc = readability.Document(txt)
|
23 |
title = html_doc.title()
|
24 |
content = html_text.extract_text(html_doc.summary(html_partial=True))
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
#
|
14 |
from rag.nlp import find_codec
|
15 |
import readability
|
16 |
import html_text
|
|
|
20 |
with open(file,'rb') as f:
|
21 |
tmp = chardet.detect(f.read())
|
22 |
return tmp['encoding']
|
23 |
+
|
24 |
class RAGFlowHtmlParser:
|
25 |
def __call__(self, fnm, binary=None):
|
26 |
txt = ""
|
|
|
30 |
else:
|
31 |
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
|
32 |
txt = f.read()
|
33 |
+
|
34 |
html_doc = readability.Document(txt)
|
35 |
title = html_doc.title()
|
36 |
content = html_text.extract_text(html_doc.summary(html_partial=True))
|
rag/app/book.py
CHANGED
@@ -135,7 +135,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
135 |
for ck in hierarchical_merge(bull, sections, 5)]
|
136 |
else:
|
137 |
sections = [s.split("@") for s, _ in sections]
|
138 |
-
sections = [(pr[0], "@" + pr[1])
|
139 |
chunks = naive_merge(
|
140 |
sections, kwargs.get(
|
141 |
"chunk_token_num", 256), kwargs.get(
|
|
|
135 |
for ck in hierarchical_merge(bull, sections, 5)]
|
136 |
else:
|
137 |
sections = [s.split("@") for s, _ in sections]
|
138 |
+
sections = [(pr[0], "@" + pr[1]) if len(pr) == 2 else (pr[0], '') for pr in sections ]
|
139 |
chunks = naive_merge(
|
140 |
sections, kwargs.get(
|
141 |
"chunk_token_num", 256), kwargs.get(
|