H
commited on
Commit
·
eb6e194
1
Parent(s):
5d39832
Fix graphrag : "role" user (#2273)
Browse files### What problem does this PR solve?
#2270
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
agent/component/jin10.py
CHANGED
@@ -100,8 +100,8 @@ class Jin10(ComponentBase, ABC):
|
|
100 |
if self._param.symbols_datatype == "quotes":
|
101 |
for i in response['data']:
|
102 |
i['Selling Price'] = i['a']
|
103 |
-
i['
|
104 |
-
i['
|
105 |
i['Stock Exchange'] = i['e']
|
106 |
i['Highest Price'] = i['h']
|
107 |
i['Yesterday’s Closing Price'] = i['hc']
|
|
|
100 |
if self._param.symbols_datatype == "quotes":
|
101 |
for i in response['data']:
|
102 |
i['Selling Price'] = i['a']
|
103 |
+
i['Buying Price'] = i['b']
|
104 |
+
i['Commodity Code'] = i['c']
|
105 |
i['Stock Exchange'] = i['e']
|
106 |
i['Highest Price'] = i['h']
|
107 |
i['Yesterday’s Closing Price'] = i['hc']
|
graphrag/claim_extractor.py
CHANGED
@@ -170,7 +170,7 @@ class ClaimExtractor:
|
|
170 |
}
|
171 |
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
172 |
gen_conf = {"temperature": 0.5}
|
173 |
-
results = self._llm.chat(text, [], gen_conf)
|
174 |
claims = results.strip().removesuffix(completion_delimiter)
|
175 |
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
176 |
|
|
|
170 |
}
|
171 |
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
172 |
gen_conf = {"temperature": 0.5}
|
173 |
+
results = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
174 |
claims = results.strip().removesuffix(completion_delimiter)
|
175 |
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
176 |
|
graphrag/community_reports_extractor.py
CHANGED
@@ -76,7 +76,7 @@ class CommunityReportsExtractor:
|
|
76 |
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
77 |
gen_conf = {"temperature": 0.3}
|
78 |
try:
|
79 |
-
response = self._llm.chat(text, [], gen_conf)
|
80 |
token_count += num_tokens_from_string(text + response)
|
81 |
response = re.sub(r"^[^\{]*", "", response)
|
82 |
response = re.sub(r"[^\}]*$", "", response)
|
@@ -125,4 +125,5 @@ class CommunityReportsExtractor:
|
|
125 |
report_sections = "\n\n".join(
|
126 |
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
127 |
)
|
128 |
-
|
|
|
|
76 |
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
77 |
gen_conf = {"temperature": 0.3}
|
78 |
try:
|
79 |
+
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
80 |
token_count += num_tokens_from_string(text + response)
|
81 |
response = re.sub(r"^[^\{]*", "", response)
|
82 |
response = re.sub(r"[^\}]*$", "", response)
|
|
|
125 |
report_sections = "\n\n".join(
|
126 |
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
127 |
)
|
128 |
+
|
129 |
+
return f"# {title}\n\n{summary}\n\n{report_sections}"
|
graphrag/entity_resolution.py
CHANGED
@@ -125,7 +125,7 @@ class EntityResolution:
|
|
125 |
}
|
126 |
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
127 |
|
128 |
-
response = self._llm.chat(text, [], gen_conf)
|
129 |
result = self._process_results(len(candidate_resolution_i[1]), response,
|
130 |
prompt_variables.get(self._record_delimiter_key,
|
131 |
DEFAULT_RECORD_DELIMITER),
|
|
|
125 |
}
|
126 |
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
127 |
|
128 |
+
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
129 |
result = self._process_results(len(candidate_resolution_i[1]), response,
|
130 |
prompt_variables.get(self._record_delimiter_key,
|
131 |
DEFAULT_RECORD_DELIMITER),
|
graphrag/graph_extractor.py
CHANGED
@@ -163,7 +163,7 @@ class GraphExtractor:
|
|
163 |
token_count = 0
|
164 |
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
165 |
gen_conf = {"temperature": 0.3}
|
166 |
-
response = self._llm.chat(text, [], gen_conf)
|
167 |
token_count = num_tokens_from_string(text + response)
|
168 |
|
169 |
results = response or ""
|
|
|
163 |
token_count = 0
|
164 |
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
165 |
gen_conf = {"temperature": 0.3}
|
166 |
+
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
167 |
token_count = num_tokens_from_string(text + response)
|
168 |
|
169 |
results = response or ""
|
graphrag/mind_map_extractor.py
CHANGED
@@ -180,7 +180,7 @@ class MindMapExtractor:
|
|
180 |
}
|
181 |
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
182 |
gen_conf = {"temperature": 0.5}
|
183 |
-
response = self._llm.chat(text, [], gen_conf)
|
184 |
response = re.sub(r"```[^\n]*", "", response)
|
185 |
print(response)
|
186 |
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))
|
|
|
180 |
}
|
181 |
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
182 |
gen_conf = {"temperature": 0.5}
|
183 |
+
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
184 |
response = re.sub(r"```[^\n]*", "", response)
|
185 |
print(response)
|
186 |
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))
|