KevinHuSh
commited on
Commit
·
b7adc24
1
Parent(s):
c22c6b5
add help info (#142)
Browse files- web/src/components/similarity-slider/index.tsx +8 -2
- web/src/pages/add-knowledge/components/knowledge-setting/category-panel.tsx +3 -3
- web/src/pages/add-knowledge/components/knowledge-setting/configuration.tsx +7 -7
- web/src/pages/add-knowledge/components/knowledge-setting/utils.ts +76 -37
- web/src/pages/add-knowledge/components/knowledge-testing/testing-control/index.tsx +3 -2
- web/src/pages/chat/chat-configuration-modal/assistant-setting.tsx +6 -4
- web/src/pages/chat/chat-configuration-modal/model-setting.tsx +8 -8
- web/src/pages/chat/chat-configuration-modal/prompt-engine.tsx +7 -4
- web/src/pages/user-setting/setting-model/api-key-modal/index.tsx +1 -1
- web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx +12 -10
- web/src/pages/user-setting/setting-profile/index.tsx +0 -6
web/src/components/similarity-slider/index.tsx
CHANGED
@@ -15,7 +15,10 @@ const SimilaritySlider = ({ isTooltipShown = false }: IProps) => {
|
|
15 |
<Form.Item<FieldType>
|
16 |
label="Similarity threshold"
|
17 |
name={'similarity_threshold'}
|
18 |
-
tooltip={isTooltipShown &&
|
|
|
|
|
|
|
19 |
initialValue={0.2}
|
20 |
>
|
21 |
<Slider max={1} step={0.01} />
|
@@ -24,7 +27,10 @@ const SimilaritySlider = ({ isTooltipShown = false }: IProps) => {
|
|
24 |
label="Vector similarity weight"
|
25 |
name={'vector_similarity_weight'}
|
26 |
initialValue={0.3}
|
27 |
-
tooltip={isTooltipShown &&
|
|
|
|
|
|
|
28 |
>
|
29 |
<Slider max={1} step={0.01} />
|
30 |
</Form.Item>
|
|
|
15 |
<Form.Item<FieldType>
|
16 |
label="Similarity threshold"
|
17 |
name={'similarity_threshold'}
|
18 |
+
tooltip={isTooltipShown && `We use hybrid similarity score to evaluate distance between two lines of text.
|
19 |
+
It\'s weighted keywords similarity and vector cosine similarity.
|
20 |
+
If the similarity between query and chunk is less than this threshold, the chunk will be filtered out.`
|
21 |
+
}
|
22 |
initialValue={0.2}
|
23 |
>
|
24 |
<Slider max={1} step={0.01} />
|
|
|
27 |
label="Vector similarity weight"
|
28 |
name={'vector_similarity_weight'}
|
29 |
initialValue={0.3}
|
30 |
+
tooltip={isTooltipShown && `We use hybrid similarity score to evaluate distance between two lines of text.
|
31 |
+
It\'s weighted keywords similarity and vector cosine similarity.
|
32 |
+
The sum of both weights is 1.0.
|
33 |
+
`}
|
34 |
>
|
35 |
<Slider max={1} step={0.01} />
|
36 |
</Form.Item>
|
web/src/pages/add-knowledge/components/knowledge-setting/category-panel.tsx
CHANGED
@@ -33,16 +33,16 @@ const CategoryPanel = ({ chunkMethod }: { chunkMethod: string }) => {
|
|
33 |
{imageList.length > 0 ? (
|
34 |
<>
|
35 |
<Title level={5} className={styles.topTitle}>
|
36 |
-
{item.title}
|
37 |
</Title>
|
38 |
<p
|
39 |
dangerouslySetInnerHTML={{
|
40 |
__html: item.description,
|
41 |
}}
|
42 |
></p>
|
43 |
-
<Title level={5}>{item.title}
|
44 |
<Text>
|
45 |
-
|
46 |
for you.
|
47 |
</Text>
|
48 |
<Row gutter={[10, 10]} className={styles.imageRow}>
|
|
|
33 |
{imageList.length > 0 ? (
|
34 |
<>
|
35 |
<Title level={5} className={styles.topTitle}>
|
36 |
+
"{item.title}" Chunking Method Description
|
37 |
</Title>
|
38 |
<p
|
39 |
dangerouslySetInnerHTML={{
|
40 |
__html: item.description,
|
41 |
}}
|
42 |
></p>
|
43 |
+
<Title level={5}>"{item.title}" Examples</Title>
|
44 |
<Text>
|
45 |
+
This visual guides is in order to make understanding easier
|
46 |
for you.
|
47 |
</Text>
|
48 |
<Row gutter={[10, 10]} className={styles.imageRow}>
|
web/src/pages/add-knowledge/components/knowledge-setting/configuration.tsx
CHANGED
@@ -83,7 +83,7 @@ const ConfigurationForm = ({ form }: { form: FormInstance }) => {
|
|
83 |
<Form.Item
|
84 |
name="permission"
|
85 |
label="Permissions"
|
86 |
-
tooltip="
|
87 |
rules={[{ required: true }]}
|
88 |
>
|
89 |
<Radio.Group>
|
@@ -93,22 +93,22 @@ const ConfigurationForm = ({ form }: { form: FormInstance }) => {
|
|
93 |
</Form.Item>
|
94 |
<Form.Item
|
95 |
name="embd_id"
|
96 |
-
label="Embedding
|
97 |
rules={[{ required: true }]}
|
98 |
-
tooltip="
|
99 |
>
|
100 |
<Select
|
101 |
-
placeholder="Please select a
|
102 |
options={embeddingModelOptions}
|
103 |
></Select>
|
104 |
</Form.Item>
|
105 |
<Form.Item
|
106 |
name="parser_id"
|
107 |
label="Chunk method"
|
108 |
-
tooltip="
|
109 |
rules={[{ required: true }]}
|
110 |
>
|
111 |
-
<Select placeholder="Please select a
|
112 |
{parserList.map((x) => (
|
113 |
<Option value={x.value} key={x.value}>
|
114 |
{x.label}
|
@@ -122,7 +122,7 @@ const ConfigurationForm = ({ form }: { form: FormInstance }) => {
|
|
122 |
|
123 |
if (parserId === 'naive') {
|
124 |
return (
|
125 |
-
<Form.Item label="
|
126 |
<Flex gap={20} align="center">
|
127 |
<Flex flex={1}>
|
128 |
<Form.Item
|
|
|
83 |
<Form.Item
|
84 |
name="permission"
|
85 |
label="Permissions"
|
86 |
+
tooltip="If the permission is 'Team', all the team member can manipulate the knowledgebase."
|
87 |
rules={[{ required: true }]}
|
88 |
>
|
89 |
<Radio.Group>
|
|
|
93 |
</Form.Item>
|
94 |
<Form.Item
|
95 |
name="embd_id"
|
96 |
+
label="Embedding model"
|
97 |
rules={[{ required: true }]}
|
98 |
+
tooltip="The embedding model used to embedding chunks. It's unchangable once the knowledgebase has chunks. You need to delete all the chunks if you want to change it."
|
99 |
>
|
100 |
<Select
|
101 |
+
placeholder="Please select a embedding model"
|
102 |
options={embeddingModelOptions}
|
103 |
></Select>
|
104 |
</Form.Item>
|
105 |
<Form.Item
|
106 |
name="parser_id"
|
107 |
label="Chunk method"
|
108 |
+
tooltip="The instruction is at right."
|
109 |
rules={[{ required: true }]}
|
110 |
>
|
111 |
+
<Select placeholder="Please select a chunk method">
|
112 |
{parserList.map((x) => (
|
113 |
<Option value={x.value} key={x.value}>
|
114 |
{x.label}
|
|
|
122 |
|
123 |
if (parserId === 'naive') {
|
124 |
return (
|
125 |
+
<Form.Item label="Token number" tooltip="It determine the token number of a chunk approximately.">
|
126 |
<Flex gap={20} align="center">
|
127 |
<Flex flex={1}>
|
128 |
<Form.Item
|
web/src/pages/add-knowledge/components/knowledge-setting/utils.ts
CHANGED
@@ -7,78 +7,117 @@ export const ImageMap = {
|
|
7 |
book: getImageName('book', 4),
|
8 |
laws: getImageName('law', 4),
|
9 |
manual: getImageName('manual', 4),
|
10 |
-
|
11 |
naive: getImageName('naive', 2),
|
12 |
paper: getImageName('paper', 2),
|
13 |
presentation: getImageName('presentation', 2),
|
14 |
qa: getImageName('qa', 2),
|
15 |
resume: getImageName('resume', 2),
|
16 |
table: getImageName('table', 2),
|
|
|
17 |
};
|
18 |
|
19 |
export const TextMap = {
|
20 |
book: {
|
21 |
title: '',
|
22 |
-
description:
|
23 |
Since a book is long and not all the parts are useful, if it's a PDF,
|
24 |
-
please setup the page ranges for every book in order eliminate negative effects and save computing time for analyzing
|
25 |
},
|
26 |
laws: {
|
27 |
title: '',
|
28 |
-
description:
|
|
|
|
|
|
|
|
|
29 |
},
|
30 |
-
manual: { title: '', description:
|
31 |
-
|
|
|
|
|
32 |
naive: {
|
33 |
title: '',
|
34 |
-
description:
|
35 |
-
This method apply the naive ways to chunk files
|
36 |
-
|
37 |
-
|
|
|
38 |
},
|
39 |
paper: {
|
40 |
title: '',
|
41 |
-
description:
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
},
|
44 |
presentation: {
|
45 |
title: '',
|
46 |
-
description:
|
47 |
-
Every page will be treated as a chunk. And the thumbnail of every page will be stored
|
48 |
-
PPT
|
49 |
},
|
50 |
qa: {
|
51 |
title: '',
|
52 |
-
description:
|
53 |
-
If the file is in excel format, there should be 2
|
54 |
And question column is ahead of answer column.
|
55 |
-
And it's O.K if it has multiple sheets as long as the columns are rightly composed
|
56 |
|
57 |
-
If it's in csv format, it should be UTF-8 encoded. Use TAB as delimiter to separate question and answer
|
58 |
|
59 |
-
All the deformed lines will be ignored.
|
60 |
-
Every pair of Q&A will be treated as a chunk
|
61 |
},
|
62 |
resume: {
|
63 |
title: '',
|
64 |
-
description:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
},
|
66 |
table: {
|
67 |
title: '',
|
68 |
-
description:
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
},
|
84 |
};
|
|
|
7 |
book: getImageName('book', 4),
|
8 |
laws: getImageName('law', 4),
|
9 |
manual: getImageName('manual', 4),
|
10 |
+
picture: getImageName('picture', 2),
|
11 |
naive: getImageName('naive', 2),
|
12 |
paper: getImageName('paper', 2),
|
13 |
presentation: getImageName('presentation', 2),
|
14 |
qa: getImageName('qa', 2),
|
15 |
resume: getImageName('resume', 2),
|
16 |
table: getImageName('table', 2),
|
17 |
+
one: getImageName('one', 2),
|
18 |
};
|
19 |
|
20 |
export const TextMap = {
|
21 |
book: {
|
22 |
title: '',
|
23 |
+
description: `<p>Supported file formats are <b>DOCX</b>, <b>PDF</b>, <b>TXT</b>.</p><p>
|
24 |
Since a book is long and not all the parts are useful, if it's a PDF,
|
25 |
+
please setup the <i>page ranges</i> for every book in order eliminate negative effects and save computing time for analyzing.</p>`,
|
26 |
},
|
27 |
laws: {
|
28 |
title: '',
|
29 |
+
description: `<p>Supported file formats are <b>DOCX</b>, <b>PDF</b>, <b>TXT</b>.</p><p>
|
30 |
+
Legal documents have a very rigorous writing format. We use text feature to detect split point.
|
31 |
+
</p><p>
|
32 |
+
The chunk granularity is consistent with 'ARTICLE', and all the upper level text will be included in the chunk.
|
33 |
+
</p>`,
|
34 |
},
|
35 |
+
manual: { title: '', description: `<p>Only <b>PDF</b> is supported.</p><p>
|
36 |
+
We assume manual has hierarchical section structure. We use the lowest section titles as pivots to slice documents.
|
37 |
+
So, the figures and tables in the same section will not be sliced apart, and chunk size might be large.
|
38 |
+
</p>` },
|
39 |
naive: {
|
40 |
title: '',
|
41 |
+
description: `<p>Supported file formats are <b>DOCX, EXCEL, PPT, IMAGE, PDF, TXT</b>.</p>
|
42 |
+
<p>This method apply the naive ways to chunk files: </p>
|
43 |
+
<p>
|
44 |
+
<li>Successive text will be sliced into pieces using vision detection model.</li>
|
45 |
+
<li>Next, these successive pieces are merge into chunks whose token number is no more than 'Token number'.</li></p>`,
|
46 |
},
|
47 |
paper: {
|
48 |
title: '',
|
49 |
+
description: `<p>Only <b>PDF</b> file is supported.</p><p>
|
50 |
+
If our model works well, the paper will be sliced by it's sections, like <i>abstract, 1.1, 1.2</i>, etc. </p><p>
|
51 |
+
The benefit of doing this is that LLM can better summarize the content of relevant sections in the paper,
|
52 |
+
resulting in more comprehensive answers that help readers better understand the paper.
|
53 |
+
The downside is that it increases the context of the LLM conversation and adds computational cost,
|
54 |
+
so during the conversation, you can consider reducing the ‘<b>topN</b>’ setting.</p>`,
|
55 |
},
|
56 |
presentation: {
|
57 |
title: '',
|
58 |
+
description: `<p>The supported file formats are <b>PDF</b>, <b>PPTX</b>.</p><p>
|
59 |
+
Every page will be treated as a chunk. And the thumbnail of every page will be stored.</p><p>
|
60 |
+
<i>All the PPT files you uploaded will be chunked by using this method automatically, setting-up for every PPT file is not necessary.</i></p>`,
|
61 |
},
|
62 |
qa: {
|
63 |
title: '',
|
64 |
+
description: `<p><b>EXCEL</b> and <b>CSV/TXT</b> files are supported.</p><p>
|
65 |
+
If the file is in excel format, there should be 2 columns question and answer without header.
|
66 |
And question column is ahead of answer column.
|
67 |
+
And it's O.K if it has multiple sheets as long as the columns are rightly composed.</p><p>
|
68 |
|
69 |
+
If it's in csv format, it should be UTF-8 encoded. Use TAB as delimiter to separate question and answer.</p><p>
|
70 |
|
71 |
+
<i>All the deformed lines will be ignored.
|
72 |
+
Every pair of Q&A will be treated as a chunk.</i></p>`,
|
73 |
},
|
74 |
resume: {
|
75 |
title: '',
|
76 |
+
description: `<p>The supported file formats are <b>DOCX</b>, <b>PDF</b>, <b>TXT</b>.
|
77 |
+
</p><p>
|
78 |
+
The résumé comes in a variety of formats, just like a person’s personality, but we often have to organize them into structured data that makes it easy to search.
|
79 |
+
</p><p>
|
80 |
+
Instead of chunking the résumé, we parse the résumé into structured data. As a HR, you can dump all the résumé you have,
|
81 |
+
the you can list all the candidates that match the qualifications just by talk with <i>'RagFlow'</i>.
|
82 |
+
</p>
|
83 |
+
`,
|
84 |
},
|
85 |
table: {
|
86 |
title: '',
|
87 |
+
description: `<p><b>EXCEL</b> and <b>CSV/TXT</b> format files are supported.</p><p>
|
88 |
+
Here're some tips:
|
89 |
+
<ul>
|
90 |
+
<li>For csv or txt file, the delimiter between columns is <em><b>TAB</b></em>.</li>
|
91 |
+
<li>The first line must be column headers.</li>
|
92 |
+
<li>Column headers must be meaningful terms in order to make our LLM understanding.
|
93 |
+
It's good to enumerate some synonyms using slash <i>'/'</i> to separate, and even better to
|
94 |
+
enumerate values using brackets like <i>'gender/sex(male, female)'</i>.<p>
|
95 |
+
Here are some examples for headers:<ol>
|
96 |
+
<li>supplier/vendor<b>'TAB'</b>color(yellow, red, brown)<b>'TAB'</b>gender/sex(male, female)<b>'TAB'</b>size(M,L,XL,XXL)</li>
|
97 |
+
<li>姓名/名字<b>'TAB'</b>电话/手机/微信<b>'TAB'</b>最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)</li>
|
98 |
+
</ol>
|
99 |
+
</p>
|
100 |
+
</li>
|
101 |
+
<li>Every row in table will be treated as a chunk.</li>
|
102 |
+
</ul>`,
|
103 |
+
},
|
104 |
+
picture: {
|
105 |
+
title: '',
|
106 |
+
description: `
|
107 |
+
<p>Image files are supported. Video is coming soon.</p><p>
|
108 |
+
If the picture has text in it, OCR is applied to extract the text as its text description.
|
109 |
+
</p><p>
|
110 |
+
If the text extracted by OCR is not enough, visual LLM is used to get the descriptions.
|
111 |
+
</p>`,
|
112 |
+
},
|
113 |
+
one: {
|
114 |
+
title: '',
|
115 |
+
description: `
|
116 |
+
<p>Supported file formats are <b>DOCX, EXCEL, PDF, TXT</b>.
|
117 |
+
</p><p>
|
118 |
+
For a document, it will be treated as an entire chunk, no split at all.
|
119 |
+
</p><p>
|
120 |
+
If you don't trust any chunk method and the selected LLM's context length covers the document length, you can try this method.
|
121 |
+
</p>`,
|
122 |
},
|
123 |
};
|
web/src/pages/add-knowledge/components/knowledge-testing/testing-control/index.tsx
CHANGED
@@ -53,9 +53,10 @@ const TestingControl = ({ form, handleTesting }: IProps) => {
|
|
53 |
>
|
54 |
<SimilaritySlider isTooltipShown></SimilaritySlider>
|
55 |
<Form.Item<FieldType>
|
56 |
-
label="Top
|
57 |
name={'top_k'}
|
58 |
-
tooltip="
|
|
|
59 |
>
|
60 |
<Slider marks={{ 0: 0, 2048: 2048 }} max={2048} />
|
61 |
</Form.Item>
|
|
|
53 |
>
|
54 |
<SimilaritySlider isTooltipShown></SimilaritySlider>
|
55 |
<Form.Item<FieldType>
|
56 |
+
label="Top K"
|
57 |
name={'top_k'}
|
58 |
+
tooltip="For the computaion cost, not all the retrieved chunk will be computed vector cosine similarity with query.
|
59 |
+
The bigger the 'Top K' is, the higher the recall rate is, the slower the retrieval speed is."
|
60 |
>
|
61 |
<Slider marks={{ 0: 0, 2048: 2048 }} max={2048} />
|
62 |
</Form.Item>
|
web/src/pages/chat/chat-configuration-modal/assistant-setting.tsx
CHANGED
@@ -55,6 +55,7 @@ const AssistantSetting = ({ show }: ISegmentedContentProps) => {
|
|
55 |
label="Language"
|
56 |
initialValue={'Chinese'}
|
57 |
tooltip="coming soon"
|
|
|
58 |
>
|
59 |
<Select
|
60 |
options={[
|
@@ -66,22 +67,23 @@ const AssistantSetting = ({ show }: ISegmentedContentProps) => {
|
|
66 |
<Form.Item
|
67 |
name={['prompt_config', 'empty_response']}
|
68 |
label="Empty response"
|
69 |
-
tooltip="
|
|
|
70 |
>
|
71 |
<Input placeholder="" />
|
72 |
</Form.Item>
|
73 |
<Form.Item
|
74 |
name={['prompt_config', 'prologue']}
|
75 |
label="Set an opener"
|
76 |
-
tooltip="
|
77 |
initialValue={"Hi! I'm your assistant, what can I do for you?"}
|
78 |
>
|
79 |
<Input.TextArea autoSize={{ minRows: 5 }} />
|
80 |
</Form.Item>
|
81 |
<Form.Item
|
82 |
-
label="
|
83 |
name="kb_ids"
|
84 |
-
tooltip="
|
85 |
rules={[
|
86 |
{
|
87 |
required: true,
|
|
|
55 |
label="Language"
|
56 |
initialValue={'Chinese'}
|
57 |
tooltip="coming soon"
|
58 |
+
style={{display:'none'}}
|
59 |
>
|
60 |
<Select
|
61 |
options={[
|
|
|
67 |
<Form.Item
|
68 |
name={['prompt_config', 'empty_response']}
|
69 |
label="Empty response"
|
70 |
+
tooltip="If nothing is retrieved with user's question in the knowledgebase, it will use this as an answer.
|
71 |
+
If you want LLM comes up with its own opinion when nothing is retrieved, leave this blank."
|
72 |
>
|
73 |
<Input placeholder="" />
|
74 |
</Form.Item>
|
75 |
<Form.Item
|
76 |
name={['prompt_config', 'prologue']}
|
77 |
label="Set an opener"
|
78 |
+
tooltip="How do you want to welcome your clients?"
|
79 |
initialValue={"Hi! I'm your assistant, what can I do for you?"}
|
80 |
>
|
81 |
<Input.TextArea autoSize={{ minRows: 5 }} />
|
82 |
</Form.Item>
|
83 |
<Form.Item
|
84 |
+
label="Knowledgebases"
|
85 |
name="kb_ids"
|
86 |
+
tooltip="Select knowledgebases associated."
|
87 |
rules={[
|
88 |
{
|
89 |
required: true,
|
web/src/pages/chat/chat-configuration-modal/model-setting.tsx
CHANGED
@@ -46,16 +46,16 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
46 |
<Form.Item
|
47 |
label="Model"
|
48 |
name="llm_id"
|
49 |
-
tooltip="
|
50 |
rules={[{ required: true, message: 'Please select!' }]}
|
51 |
>
|
52 |
<Select options={modelOptions} showSearch />
|
53 |
</Form.Item>
|
54 |
<Divider></Divider>
|
55 |
<Form.Item
|
56 |
-
label="
|
57 |
name="parameters"
|
58 |
-
tooltip="
|
59 |
initialValue={ModelVariableType.Precise}
|
60 |
// rules={[{ required: true, message: 'Please input!' }]}
|
61 |
>
|
@@ -64,7 +64,7 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
64 |
onChange={handleParametersChange}
|
65 |
/>
|
66 |
</Form.Item>
|
67 |
-
<Form.Item label="Temperature" tooltip={'
|
68 |
<Flex gap={20} align="center">
|
69 |
<Form.Item
|
70 |
name={'temperatureEnabled'}
|
@@ -96,7 +96,7 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
96 |
</Form.Item>
|
97 |
</Flex>
|
98 |
</Form.Item>
|
99 |
-
<Form.Item label="Top P" tooltip={'
|
100 |
<Flex gap={20} align="center">
|
101 |
<Form.Item name={'topPEnabled'} valuePropName="checked" noStyle>
|
102 |
<Switch size="small" />
|
@@ -124,7 +124,7 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
124 |
</Form.Item>
|
125 |
</Flex>
|
126 |
</Form.Item>
|
127 |
-
<Form.Item label="Presence Penalty" tooltip={'
|
128 |
<Flex gap={20} align="center">
|
129 |
<Form.Item
|
130 |
name={'presencePenaltyEnabled'}
|
@@ -160,7 +160,7 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
160 |
</Form.Item>
|
161 |
</Flex>
|
162 |
</Form.Item>
|
163 |
-
<Form.Item label="Frequency Penalty" tooltip={'
|
164 |
<Flex gap={20} align="center">
|
165 |
<Form.Item
|
166 |
name={'frequencyPenaltyEnabled'}
|
@@ -196,7 +196,7 @@ const ModelSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
196 |
</Form.Item>
|
197 |
</Flex>
|
198 |
</Form.Item>
|
199 |
-
<Form.Item label="Max Tokens" tooltip={'
|
200 |
<Flex gap={20} align="center">
|
201 |
<Form.Item name={'maxTokensEnabled'} valuePropName="checked" noStyle>
|
202 |
<Switch size="small" />
|
|
|
46 |
<Form.Item
|
47 |
label="Model"
|
48 |
name="llm_id"
|
49 |
+
tooltip="Large language chat model"
|
50 |
rules={[{ required: true, message: 'Please select!' }]}
|
51 |
>
|
52 |
<Select options={modelOptions} showSearch />
|
53 |
</Form.Item>
|
54 |
<Divider></Divider>
|
55 |
<Form.Item
|
56 |
+
label="Freedom"
|
57 |
name="parameters"
|
58 |
+
tooltip="'Precise' means the LLM will be conservative and answer your question cautiously. 'Improvise' means the you want LLM talk much and freely. 'Balance' is between cautiously and freely."
|
59 |
initialValue={ModelVariableType.Precise}
|
60 |
// rules={[{ required: true, message: 'Please input!' }]}
|
61 |
>
|
|
|
64 |
onChange={handleParametersChange}
|
65 |
/>
|
66 |
</Form.Item>
|
67 |
+
<Form.Item label="Temperature" tooltip={'This parameter controls the randomness of predictions by the model. A lower temperature makes the model more confident in its responses, while a higher temperature makes it more creative and diverse.'}>
|
68 |
<Flex gap={20} align="center">
|
69 |
<Form.Item
|
70 |
name={'temperatureEnabled'}
|
|
|
96 |
</Form.Item>
|
97 |
</Flex>
|
98 |
</Form.Item>
|
99 |
+
<Form.Item label="Top P" tooltip={'Also known as “nucleus sampling,” this parameter sets a threshold to select a smaller set of words to sample from. It focuses on the most likely words, cutting off the less probable ones.'}>
|
100 |
<Flex gap={20} align="center">
|
101 |
<Form.Item name={'topPEnabled'} valuePropName="checked" noStyle>
|
102 |
<Switch size="small" />
|
|
|
124 |
</Form.Item>
|
125 |
</Flex>
|
126 |
</Form.Item>
|
127 |
+
<Form.Item label="Presence Penalty" tooltip={'This discourages the model from repeating the same information by penalizing words that have already appeared in the conversation.'}>
|
128 |
<Flex gap={20} align="center">
|
129 |
<Form.Item
|
130 |
name={'presencePenaltyEnabled'}
|
|
|
160 |
</Form.Item>
|
161 |
</Flex>
|
162 |
</Form.Item>
|
163 |
+
<Form.Item label="Frequency Penalty" tooltip={'Similar to the presence penalty, this reduces the model’s tendency to repeat the same words frequently.'}>
|
164 |
<Flex gap={20} align="center">
|
165 |
<Form.Item
|
166 |
name={'frequencyPenaltyEnabled'}
|
|
|
196 |
</Form.Item>
|
197 |
</Flex>
|
198 |
</Form.Item>
|
199 |
+
<Form.Item label="Max Tokens" tooltip={'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).'}>
|
200 |
<Flex gap={20} align="center">
|
201 |
<Form.Item name={'maxTokensEnabled'} valuePropName="checked" noStyle>
|
202 |
<Switch size="small" />
|
web/src/pages/chat/chat-configuration-modal/prompt-engine.tsx
CHANGED
@@ -154,7 +154,7 @@ const PromptEngine = (
|
|
154 |
<Form.Item
|
155 |
label="System"
|
156 |
rules={[{ required: true, message: 'Please input!' }]}
|
157 |
-
tooltip="
|
158 |
name={['prompt_config', 'system']}
|
159 |
initialValue={`你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
160 |
以下是知识库:
|
@@ -166,10 +166,10 @@ const PromptEngine = (
|
|
166 |
<Divider></Divider>
|
167 |
<SimilaritySlider isTooltipShown></SimilaritySlider>
|
168 |
<Form.Item<FieldType>
|
169 |
-
label="Top
|
170 |
name={'top_n'}
|
171 |
initialValue={8}
|
172 |
-
tooltip={'
|
173 |
>
|
174 |
<Slider max={30} />
|
175 |
</Form.Item>
|
@@ -178,7 +178,10 @@ const PromptEngine = (
|
|
178 |
<Col span={7} className={styles.variableAlign}>
|
179 |
<label className={styles.variableLabel}>
|
180 |
Variables
|
181 |
-
<Tooltip title="
|
|
|
|
|
|
|
182 |
<QuestionCircleOutlined className={styles.variableIcon} />
|
183 |
</Tooltip>
|
184 |
</label>
|
|
|
154 |
<Form.Item
|
155 |
label="System"
|
156 |
rules={[{ required: true, message: 'Please input!' }]}
|
157 |
+
tooltip="Instructions you need LLM to follow when LLM answers questions, like charactor design, answer length and answer language etc."
|
158 |
name={['prompt_config', 'system']}
|
159 |
initialValue={`你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
160 |
以下是知识库:
|
|
|
166 |
<Divider></Divider>
|
167 |
<SimilaritySlider isTooltipShown></SimilaritySlider>
|
168 |
<Form.Item<FieldType>
|
169 |
+
label="Top N"
|
170 |
name={'top_n'}
|
171 |
initialValue={8}
|
172 |
+
tooltip={`Not all the chunks whose similarity score is above the 'simialrity threashold' will be feed to LLMs. LLM can only see these 'Top N' chunks.`}
|
173 |
>
|
174 |
<Slider max={30} />
|
175 |
</Form.Item>
|
|
|
178 |
<Col span={7} className={styles.variableAlign}>
|
179 |
<label className={styles.variableLabel}>
|
180 |
Variables
|
181 |
+
<Tooltip title="If you use dialog APIs, the varialbes might help you chat with your clients with different strategies.
|
182 |
+
The variables are used to fill-in the 'System' part in prompt in order to give LLM a hint.
|
183 |
+
The 'knowledge' is a very special variable which will be filled-in with the retrieved chunks.
|
184 |
+
All the variables in 'System' should be curly bracketed.">
|
185 |
<QuestionCircleOutlined className={styles.variableIcon} />
|
186 |
</Tooltip>
|
187 |
</label>
|
web/src/pages/user-setting/setting-model/api-key-modal/index.tsx
CHANGED
@@ -66,7 +66,7 @@ const ApiKeyModal = ({
|
|
66 |
<Form.Item<FieldType>
|
67 |
label="Api-Key"
|
68 |
name="api_key"
|
69 |
-
tooltip="
|
70 |
rules={[{ required: true, message: 'Please input api key!' }]}
|
71 |
>
|
72 |
<Input />
|
|
|
66 |
<Form.Item<FieldType>
|
67 |
label="Api-Key"
|
68 |
name="api_key"
|
69 |
+
tooltip="The API key can be obtained by registering the corresponding LLM supplier."
|
70 |
rules={[{ required: true, message: 'Please input api key!' }]}
|
71 |
>
|
72 |
<Input />
|
web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx
CHANGED
@@ -43,25 +43,27 @@ const SystemModelSettingModal = ({
|
|
43 |
confirmLoading={loading}
|
44 |
>
|
45 |
<Form form={form} onValuesChange={onFormLayoutChange} layout={'vertical'}>
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
tooltip="coming soon"
|
50 |
-
>
|
51 |
-
<Select options={allOptions[LlmModelType.Speech2text]} />
|
52 |
</Form.Item>
|
53 |
-
<Form.Item label="Embedding model" name="embd_id" tooltip="
|
54 |
<Select options={allOptions[LlmModelType.Embedding]} />
|
55 |
</Form.Item>
|
56 |
<Form.Item
|
57 |
label="Img2txt model"
|
58 |
name="img2txt_id"
|
59 |
-
tooltip="
|
60 |
>
|
61 |
<Select options={allOptions[LlmModelType.Image2text]} />
|
62 |
</Form.Item>
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
65 |
</Form.Item>
|
66 |
</Form>
|
67 |
</Modal>
|
|
|
43 |
confirmLoading={loading}
|
44 |
>
|
45 |
<Form form={form} onValuesChange={onFormLayoutChange} layout={'vertical'}>
|
46 |
+
|
47 |
+
<Form.Item label="Chat model" name="llm_id" tooltip="The default chat LLM all the newly created knowledgebase will use.">
|
48 |
+
<Select options={allOptions[LlmModelType.Chat]} />
|
|
|
|
|
|
|
49 |
</Form.Item>
|
50 |
+
<Form.Item label="Embedding model" name="embd_id" tooltip="The default embedding model all the newly created knowledgebase will use.">
|
51 |
<Select options={allOptions[LlmModelType.Embedding]} />
|
52 |
</Form.Item>
|
53 |
<Form.Item
|
54 |
label="Img2txt model"
|
55 |
name="img2txt_id"
|
56 |
+
tooltip="The default multi-module model all the newly created knowledgebase will use. It can describe a picture or video."
|
57 |
>
|
58 |
<Select options={allOptions[LlmModelType.Image2text]} />
|
59 |
</Form.Item>
|
60 |
+
|
61 |
+
<Form.Item
|
62 |
+
label="Sequence2txt model"
|
63 |
+
name="asr_id"
|
64 |
+
tooltip="The default ASR model all the newly created knowledgebase will use. Use this model to translate voices to corresponding text."
|
65 |
+
>
|
66 |
+
<Select options={allOptions[LlmModelType.Speech2text]} />
|
67 |
</Form.Item>
|
68 |
</Form>
|
69 |
</Modal>
|
web/src/pages/user-setting/setting-profile/index.tsx
CHANGED
@@ -110,9 +110,6 @@ const UserSettingProfile = () => {
|
|
110 |
<div>
|
111 |
<Space>
|
112 |
Your photo
|
113 |
-
<Tooltip title="coming soon">
|
114 |
-
<QuestionCircleOutlined />
|
115 |
-
</Tooltip>
|
116 |
</Space>
|
117 |
<div>This will be displayed on your profile.</div>
|
118 |
</div>
|
@@ -140,7 +137,6 @@ const UserSettingProfile = () => {
|
|
140 |
<Form.Item<FieldType>
|
141 |
label="Color schema"
|
142 |
name="color_schema"
|
143 |
-
tooltip="coming soon"
|
144 |
rules={[
|
145 |
{ required: true, message: 'Please select your color schema!' },
|
146 |
]}
|
@@ -154,7 +150,6 @@ const UserSettingProfile = () => {
|
|
154 |
<Form.Item<FieldType>
|
155 |
label="Language"
|
156 |
name="language"
|
157 |
-
tooltip="coming soon"
|
158 |
rules={[{ required: true, message: 'Please input your language!' }]}
|
159 |
>
|
160 |
<Select placeholder="select your language">
|
@@ -166,7 +161,6 @@ const UserSettingProfile = () => {
|
|
166 |
<Form.Item<FieldType>
|
167 |
label="Timezone"
|
168 |
name="timezone"
|
169 |
-
tooltip="coming soon"
|
170 |
rules={[{ required: true, message: 'Please input your timezone!' }]}
|
171 |
>
|
172 |
<Select placeholder="select your timezone" showSearch>
|
|
|
110 |
<div>
|
111 |
<Space>
|
112 |
Your photo
|
|
|
|
|
|
|
113 |
</Space>
|
114 |
<div>This will be displayed on your profile.</div>
|
115 |
</div>
|
|
|
137 |
<Form.Item<FieldType>
|
138 |
label="Color schema"
|
139 |
name="color_schema"
|
|
|
140 |
rules={[
|
141 |
{ required: true, message: 'Please select your color schema!' },
|
142 |
]}
|
|
|
150 |
<Form.Item<FieldType>
|
151 |
label="Language"
|
152 |
name="language"
|
|
|
153 |
rules={[{ required: true, message: 'Please input your language!' }]}
|
154 |
>
|
155 |
<Select placeholder="select your language">
|
|
|
161 |
<Form.Item<FieldType>
|
162 |
label="Timezone"
|
163 |
name="timezone"
|
|
|
164 |
rules={[{ required: true, message: 'Please input your timezone!' }]}
|
165 |
>
|
166 |
<Select placeholder="select your timezone" showSearch>
|