WJMisgoodboy commited on
Commit
703aaf2
·
1 Parent(s): 3334935
api/__pycache__/chatText2ImageCommit.cpython-39.pyc ADDED
Binary file (911 Bytes). View file
 
api/__pycache__/chatText2ImageQuery.cpython-39.pyc ADDED
Binary file (1.7 kB). View file
 
api/chatText2ImageCommit.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ from tool.getSign import sign
4
+
5
+ def getImageCommit(caption):
6
+ # url = "https://llm-bot-cn.heytapmobi.com/chat/v1/completions"
7
+ # intput_content = "黑色的 短脚 胖猫"
8
+
9
+ payload = json.dumps({
10
+ "model": "midjourney",
11
+ "caption": caption,
12
+ "action": "IMAGINE",
13
+ })
14
+
15
+ app_id = 'auto_58d4be458b124966b29c61fad4186a2c'
16
+ secret_key = 'ce2aca400cf64be0b582a66fa2362817'
17
+ Authorization= sign([],payload,app_id,secret_key)
18
+
19
+
20
+ url = "http://llm.oppo.local/image/v1/text2image/commit"
21
+
22
+ headers = {
23
+ 'recordId': 'auto_58d4be458b124966b29c61fad4186a2c',
24
+ 'Authorization': Authorization,
25
+ 'Content-Type': 'application/json'
26
+ }
27
+
28
+ response = requests.request("POST", url, headers=headers, data=payload)
29
+ if response.status_code == 200:
30
+ res = json.loads(response.text)
31
+ taskId = res['data']["taskId"]
32
+ else:
33
+ print("请求错误")
34
+
35
+ return taskId
api/chatText2ImageQuery.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from tool.getSign import sign
3
+ from api.chatText2ImageCommit import getImageCommit
4
+ import requests
5
+ import time
6
+ from PIL import Image
7
+
8
+ app_id = 'auto_58d4be458b124966b29c61fad4186a2c'
9
+ secret_key = 'ce2aca400cf64be0b582a66fa2362817'
10
+ #画图助手
11
+
12
+
13
+ class DrawAssistant:
14
+ def getImageByCaption(self,taskId):
15
+ time.sleep(10)
16
+ payload = json.dumps({
17
+ "model": "midjourney",
18
+ "taskId": taskId
19
+ })
20
+
21
+ Authorization= sign([],payload,app_id,secret_key)
22
+ url = "http://llm.oppo.local/image/v1/text2image/query"
23
+ headers = {
24
+ 'recordId': 'auto_58d4be458b124966b29c61fad4186a2c',
25
+ 'Authorization': Authorization,
26
+ 'Content-Type': 'application/json'
27
+ }
28
+
29
+ response = requests.request("POST", url, headers=headers, data=payload)
30
+ if response.status_code == 200:
31
+ res = json.loads(response.text)
32
+ # progress = res['data']['status']
33
+ result = res['data']['result']
34
+ else:
35
+ print("请求错误")
36
+ return 0
37
+
38
+ # print(progress)
39
+ if result != {}:
40
+ image_url = result['contentUrl']
41
+
42
+ response = requests.get(image_url)
43
+ image_data = response.content
44
+
45
+ # 保存图片
46
+ with open('image.png', 'wb') as f:
47
+ f.write(image_data)
48
+
49
+ # 展示图片
50
+ image = Image.open('image.png')
51
+ # image.show()
52
+ return image
53
+
54
+ return 0
55
+
56
+ def drawOnePic(self,caption):
57
+ taskId = getImageCommit(caption)
58
+ time.sleep(20)
59
+ for i in range(10):
60
+ res = self.getImageByCaption(taskId)
61
+ if(res):
62
+ break
63
+ return res
64
+
api/chatV1.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ from tool.getSign import sign
4
+ import tool.keyFile as key
5
+
6
+
7
+ # url = "https://llm-bot-cn.heytapmobi.com/chat/v1/completions"
8
+ intput_content = [
9
+ { "role": "system", "content": "You are a chatbot" },
10
+ { "role": "user", "content": "我能用中文向你提问题吗?" },
11
+ { "role": "assistant", "content": "当然可以,我可以用中文回答你的问题。请问你有什么需要帮助的吗?" },
12
+ { "role": "user", "content": "我想更好的与你交流,必须把每次交流信息告诉你是吗?" },
13
+ {
14
+ "role": "assistant",
15
+ "content": "是的,您需要输入您想要与我交流的信息,我才能理解并回答您的问题。如果您有任何疑问或需要帮助,请随时告诉我。"
16
+ },
17
+ { "role": "user", "content": "我上句话说了什么?" }
18
+ ]
19
+
20
+ payload = json.dumps({
21
+ "model": "gpt-4-8k",
22
+ "messages": intput_content,
23
+ "n": 1,
24
+ "topP": 0.1,
25
+ "stop": None,
26
+ "maxTokens": 500
27
+ })
28
+
29
+
30
+ Authorization= sign([],payload,key.app_id,key.secret_key)
31
+
32
+
33
+ url = "http://llm.oppo.local/chat/v1/completions"
34
+
35
+ headers = {
36
+ 'recordId': 'auto_58d4be458b124966b29c61fad4186a2c',
37
+ 'Authorization': Authorization,
38
+ 'Content-Type': 'application/json'
39
+ }
40
+
41
+ response = requests.request("POST", url, headers=headers, data=payload)
42
+
43
+ print(response.text)
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from sdk.Chat_with_memory import Conversation
4
+ from sdk.culculate import Cuculator
5
+ from sdk.chat_with_net import ChatWithNet
6
+ from api.chatText2ImageQuery import DrawAssistant
7
+ from sdk.textSummary import SummaryAssistant
8
+ from sdk.chatWithKnowledgeBase import KnowledgeBaseAssistant
9
+ import tempfile
10
+
11
+ chat = Conversation()
12
+ culculator = Cuculator()
13
+ chatWithNet = ChatWithNet()
14
+ drawAssistant = DrawAssistant()
15
+ summaryAssistant = SummaryAssistant()
16
+ KnowledgeBaseAssistant = KnowledgeBaseAssistant()
17
+
18
+
19
+ with gr.Blocks() as myAIBox:
20
+ with gr.Tab("AI聊天助手"):
21
+ #Blocks特有组件,设置所有子组件按垂直排列
22
+ #垂直排列是默认情况,不加也没关系
23
+ with gr.Column():
24
+ text_input = gr.Textbox(label="请输入你的问题")
25
+ chatbot = gr.Chatbot()
26
+ ask = gr.Button("一键答疑")
27
+ ask.click(chat.ask,text_input,chatbot)
28
+ clearHistory = gr.Button("清空当前搜索结果")
29
+ clearHistory.click(chat.clearHistory,None,chatbot, queue=False)
30
+
31
+
32
+ with gr.Tab("智能计算器"):
33
+ # Blocks特有组件,设置所有子组件按水平排列
34
+ with gr.Column():
35
+ math_input = gr.Textbox(label="请输入你的问题")
36
+ CulChatbot = gr.Chatbot()
37
+ ask = gr.Button("生成答案")
38
+ ask.click(culculator.culculate,math_input,CulChatbot)
39
+ clearHistory = gr.Button("清空当前搜索结果")
40
+ clearHistory.click(culculator.clearHistory,None,CulChatbot, queue=False)
41
+
42
+ with gr.Tab("联网问答"):
43
+ # Blocks特有组件,设置所有子组件按水平排列
44
+ with gr.Column():
45
+ text_input = gr.Textbox(label="请输入你的问题")
46
+ NetChatbot = gr.Chatbot()
47
+ ask = gr.Button("谷歌搜索问答")
48
+ ask.click(chatWithNet.searhAndChat,text_input,NetChatbot)
49
+ clearHistory = gr.Button("清空当前搜索结果")
50
+ clearHistory.click(chatWithNet.searhAndChat,None,NetChatbot)
51
+
52
+ with gr.Tab("绘图助手"):
53
+ # Blocks特有组件,设置所有子组件按水平排列
54
+ with gr.Row():
55
+ text_input = gr.Textbox(label="请输入你希望画的图片及其细节(生成图片较慢,请耐心等待)")
56
+ image_output = gr.Image()
57
+ ask = gr.Button("生成图片")
58
+ ask.click(drawAssistant.drawOnePic,text_input,image_output )
59
+ clearPic = gr.Button("清空当前搜索结果")
60
+ clearPic.click(None,None,image_output, queue=False)
61
+
62
+ with gr.Tab("文本总结器"):
63
+ # Blocks特有组件,设置所有子组件按水平排列
64
+ global tmpdir
65
+ with tempfile.TemporaryDirectory() as tmpdir:
66
+ with gr.Column():
67
+ with gr.Row():
68
+ summary_text_input = gr.Textbox(label="请输入你希望总结的文本")
69
+ summary_output = gr.Textbox(label="总结文本")
70
+ send_btn = gr.Button("对文本生成总结")
71
+ send_btn.click(summaryAssistant.summary_text,summary_text_input,summary_output)
72
+
73
+ with gr.Row():
74
+ summary_url_input = gr.Textbox(label="请输入你希望总结的网页")
75
+ summary_output2 = gr.Textbox(label="网页地址")
76
+ send_btn2 = gr.Button("对网页生成总结")
77
+ send_btn2.click(summaryAssistant.summary_url,summary_url_input,summary_output2)
78
+
79
+ with gr.Row():
80
+ summary_file_input = gr.components.File(label="请输入你希望总结的文本文件(现在仅支持txt,pdf,word格式)")
81
+ summary_output3 = gr.Textbox(label="总结文本")
82
+ send_btn3 = gr.Button("对文件生成总结")
83
+ send_btn3.click(summaryAssistant.summary_file,summary_file_input,summary_output3)
84
+
85
+ with gr.Tab("个人知识库"):
86
+ # Blocks特有组件,设置所有子组件按水平排列
87
+ with gr.Column(scale=4):
88
+ with gr.Row():
89
+ knowledge_text_input = gr.Textbox(label="请输入你的知识库的文本")
90
+ q_input = gr.Textbox(label="请输入你的问题")
91
+ res_output = gr.Textbox(label="结果输出")
92
+ s_btn = gr.Button("一键问答")
93
+ s_btn.click(KnowledgeBaseAssistant.knowledge_base_text,[knowledge_text_input,q_input],res_output)
94
+
95
+ with gr.Row():
96
+ knowledge_URL_input = gr.Textbox(label="请输入你的网页链接")
97
+ q2_input = gr.Textbox(label="请输入你的问题")
98
+ res2_output = gr.Textbox(label="结果输出")
99
+ s2_btn = gr.Button("一键问答")
100
+ s2_btn.click(KnowledgeBaseAssistant.knowledge_base_url,[knowledge_URL_input,q2_input],res2_output)
101
+
102
+ with gr.Row():
103
+ knowledge_file_input = gr.components.File(label="请输入你的知识库文本(现在仅支持txt,pdf,word格式)")
104
+ q3_input = gr.Textbox(label="请输入你的问题")
105
+ res3_output = gr.Textbox(label="结果输出")
106
+ s3_btn = gr.Button("一键问答")
107
+ s3_btn.click(KnowledgeBaseAssistant.knowledge_base_file,[knowledge_file_input,q3_input],res3_output)
108
+
109
+
110
+
111
+ if __name__ == "__main__":
112
+ myAIBox.launch(share=True)
113
+
114
+
115
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ beautifulsoup4==4.12.2
2
+ gradio==3.44.4
3
+ langchain==0.0.301
4
+ langkit==0.1.6
5
+ Pillow==10.0.1
6
+ requests==2.31.0
sdk/CChat.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ from langkit.api.chat import chat_completion
3
+ import tool.keyFile as key
4
+
5
+ langkit.api.app_id= key.app_id
6
+ langkit.api.secret_key=key.secret_key
7
+ #连续对话
8
+ class Conversation:
9
+ def __init__(self, prompt, num_of_round):
10
+ self.prompt = prompt
11
+ self.num_of_round = num_of_round
12
+ self.messages = []
13
+ self.messages.append({"role": "system", "content": self.prompt})
14
+
15
+ def ask(self, question):
16
+ try:
17
+ self.messages.append({"role": "user", "content": question})
18
+
19
+ ret = chat_completion(model='gpt-4-8k', messages=self.messages)
20
+ except Exception as e:
21
+ print(e)
22
+ return e
23
+
24
+ message = ret.data["choices"][0]["message"]
25
+ self.messages.append(message)
26
+
27
+ if len(self.messages) > self.num_of_round*2 + 1:
28
+ del self.messages[1:3] #Remove the first round conversation left.
29
+ print(message["content"])
30
+ return message["content"]
31
+
32
+ # chat = Conversation("""你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求: 1. 你的回答必须是中文 2. 回答限制在100个字以内""", 5)
33
+ # chat.ask("宫保鸡丁怎么做")
34
+ # chat.ask("糖醋里脊怎么做")
35
+ # chat.ask("我的第一个问题是什么")
sdk/Chat_with_memory.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate
2
+ from langchain.chains import ConversationChain
3
+ from langchain.memory import ConversationSummaryBufferMemory
4
+ from langkit.langchain_helpers import LLMAdaptor
5
+ import tool.keyFile as key
6
+ import langkit.api
7
+
8
+ langkit.api.app_id= key.app_id
9
+ langkit.api.secret_key=key.secret_key
10
+
11
+ #连续对话
12
+ class Conversation:
13
+ def __init__(self):
14
+ self.SUMMARIZER_TEMPLATE = """请将以下内容逐步概括所提供的对话内容,并将新的概括添加到之前的概括中,形成新的概括。
15
+ Current summary:
16
+ {summary}
17
+ New lines of conversation:
18
+ {new_lines}
19
+ New summary:
20
+ """
21
+ self.CHAT_TEMPLATE = """
22
+ {history}
23
+ Human: {input}
24
+ AI:"""
25
+
26
+ self.SUMMARY_PROMPT = PromptTemplate(
27
+ input_variables=["summary", "new_lines"], template=self.SUMMARIZER_TEMPLATE
28
+ )
29
+
30
+ self.CHAT_PROMPT = PromptTemplate( input_variables=["history", "input"], template=self.CHAT_TEMPLATE)
31
+
32
+ self.memory = ConversationSummaryBufferMemory(llm=LLMAdaptor(model='gpt-4-8k'), prompt=self.SUMMARY_PROMPT, max_token_limit=4000,k=5)
33
+ self.conversation_histroy = []
34
+
35
+
36
+ self.conversation_with_summary = ConversationChain(
37
+ llm=LLMAdaptor(model='gpt-4-8k'),
38
+ prompt=self.CHAT_PROMPT,
39
+ memory=self.memory,
40
+ verbose=False
41
+ )
42
+
43
+ def ask(self, question):
44
+ ans = self.conversation_with_summary.predict(input=question)
45
+ self.conversation_histroy.append((question,ans))
46
+ return self.conversation_histroy
47
+
48
+ def clearHistory(self):
49
+ self.memory = ConversationSummaryBufferMemory(llm=LLMAdaptor(model='gpt-4-8k'), prompt=self.SUMMARY_PROMPT, max_token_limit=4000,k=5)
50
+ self.conversation_histroy = []
51
+ return []
52
+
53
+
54
+
55
+ # chat = Conversation()
56
+ #
57
+ # ans , history = chat.ask("宫保鸡丁怎么做")
58
+
59
+ # print(chat.ask("我的上一个问题是什么"))
sdk/LLMAdaptor.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Optional, List, Mapping, Any
3
+
4
+ from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
5
+ from langchain.llms.base import LLM
6
+ from langkit.api import chat_completion, a_chat_completion
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class LLMAdaptor(LLM):
12
+ model: str
13
+ temperature: float = 1.0
14
+ topP: int = 1
15
+ maxTokens: int = 4096
16
+ presencePenalty: float = 0
17
+ frequencyPenalty: float = 0
18
+ timeout_secs: int = 180
19
+
20
+ def _call(
21
+ self,
22
+ prompt: str,
23
+ stop: Optional[List[str]] = None,
24
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
25
+ **kwargs: Any
26
+ ) -> str:
27
+ params = dict(self._identifying_params)
28
+ if stop:
29
+ params['stop'] = stop
30
+ resp = chat_completion(self.model, [{'role': 'user', 'content': prompt}], **params)
31
+ if resp.code != 0:
32
+ raise ValueError(f"LLM Adaptor resp error, resp: {resp}")
33
+ return resp.data['choices'][0]['message']['content']
34
+
35
+ @property
36
+ def _llm_type(self) -> str:
37
+ return "llm-adaptor"
38
+
39
+ @property
40
+ def _identifying_params(self) -> Mapping[str, Any]:
41
+ return {
42
+ "temperature": self.temperature,
43
+ "topP": self.topP,
44
+ "maxTokens": self.maxTokens,
45
+ "presencePenalty": self.presencePenalty,
46
+ "frequencyPenalty": self.frequencyPenalty,
47
+ 'timeout_secs': self.timeout_secs
48
+ }
sdk/__pycache__/Chat_with_memory.cpython-39.pyc ADDED
Binary file (2.08 kB). View file
 
sdk/__pycache__/chatWithKnowledgeBase.cpython-39.pyc ADDED
Binary file (2.54 kB). View file
 
sdk/__pycache__/chat_with_net.cpython-39.pyc ADDED
Binary file (1.49 kB). View file
 
sdk/__pycache__/culculate.cpython-39.pyc ADDED
Binary file (1.51 kB). View file
 
sdk/__pycache__/textSummary.cpython-39.pyc ADDED
Binary file (2.39 kB). View file
 
sdk/chat.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ from langkit.api.chat import chat_completion
3
+ import tool.keyFile as key
4
+
5
+ langkit.api.app_id= key.app_id
6
+ langkit.api.secret_key=key.secret_key
7
+
8
+ def chat(content):
9
+ ret = chat_completion(model='gpt-4-8k', messages=[
10
+ {'role': 'user', 'content': content}
11
+ ])
12
+ if ret.msg == 'ok':
13
+ return ret.data['choices'][0]['message']['content']
14
+ return "try again"
15
+
16
+ print(chat("你好"))
sdk/chatLLM.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chains import LLMChain
2
+ from langchain.prompts import PromptTemplate
3
+ import langkit.api
4
+ import tool.keyFile as key
5
+ from langkit.langchain_helpers import LLMAdaptor
6
+ langkit.api.app_id= key.app_id
7
+ langkit.api.secret_key=key.secret_key
8
+ from langchain.memory import ConversationBufferMemory
9
+
10
+
11
+ def test_llm_adaptor():
12
+ llm = LLMAdaptor(model='gpt-4-8k')
13
+ prompt = PromptTemplate(
14
+ input_variables=["product"],
15
+ template="What is a good name for a company that makes {product}?",
16
+ )
17
+ chain = LLMChain(llm=llm, prompt=prompt,memory=ConversationBufferMemory())
18
+ print(chain.run("colorful socks"))
19
+
20
+ memory = ConversationBufferMemory()
21
+ memory.chat_memory.add_user_message("hi!")
22
+ memory.chat_memory.add_ai_message("whats up?")
23
+ test_llm_adaptor()
sdk/chatTest.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate
2
+ from langchain.chains import ConversationChain
3
+ from langchain.memory import ConversationSummaryBufferMemory
4
+ from langkit.langchain_helpers import LLMAdaptor
5
+ import tool.keyFile as key
6
+ import langkit.api
7
+
8
+ langkit.api.app_id= key.app_id
9
+ langkit.api.secret_key=key.secret_key
10
+
11
+
12
+ SUMMARIZER_TEMPLATE = """请将以下内容逐步概括所提供的对话内容,并将新的概括添加到之前的概括中,形成新的概括。
13
+ Current summary:
14
+ {summary}
15
+ New lines of conversation:
16
+ {new_lines}
17
+ New summary:
18
+ """
19
+
20
+ SUMMARY_PROMPT = PromptTemplate(
21
+ input_variables=["summary", "new_lines"], template=SUMMARIZER_TEMPLATE
22
+ )
23
+
24
+ memory = ConversationSummaryBufferMemory(llm=LLMAdaptor(model='gpt-4-8k'), prompt=SUMMARY_PROMPT, max_token_limit=4000,k=5)
25
+
26
+ CHAT_TEMPLATE = """
27
+ {history}
28
+ Human: {input}
29
+ AI:"""
30
+ CHAT_PROMPT = PromptTemplate(
31
+ input_variables=["history", "input"], template=CHAT_TEMPLATE
32
+ )
33
+
34
+ conversation_with_summary = ConversationChain(
35
+ llm=LLMAdaptor(model='gpt-4-8k'),
36
+ prompt=CHAT_PROMPT,
37
+ memory=memory,
38
+ verbose=True
39
+ )
40
+ answer = conversation_with_summary.predict(input="如何做一道好吃的宫保鸡丁?")
41
+ print(answer)
sdk/chatWithKnowledgeBase.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ from langchain.chains import RetrievalQA
3
+ from langchain.vectorstores import Chroma
4
+ from langkit.langchain_helpers import LLMAdaptor, EmbeddingsAdaptor
5
+ from langchain.document_loaders import UnstructuredFileLoader, PyPDFLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain.document_loaders import WebBaseLoader
8
+
9
+ import tool.keyFile as key
10
+ langkit.api.app_id = key.app_id
11
+ langkit.api.secret_key = key.secret_key
12
+
13
+ #知识库问答
14
+ class KnowledgeBaseAssistant:
15
+ def __init__(self):
16
+ self.llm = LLMAdaptor(model='gpt-4-8k')
17
+ self.embeddings = EmbeddingsAdaptor()
18
+
19
+ def knowledge_base_text(self,text,question):
20
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50)
21
+ docs = text_splitter.create_documents([text])
22
+
23
+ db = Chroma.from_documents(docs, self.embeddings)
24
+ retriever = db.as_retriever(search_kwargs={"k": 2})
25
+ qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff", retriever=retriever)
26
+
27
+ try:
28
+ res = qa.run(question)
29
+ except:
30
+ res = "请重新尝试"
31
+ return res
32
+
33
+ def knowledge_base_url(self,url,question):
34
+ loader = WebBaseLoader(url)
35
+ documents = loader.load()
36
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50)
37
+ docs = text_splitter.split_documents(documents)
38
+
39
+ db = Chroma.from_documents(docs, self.embeddings)
40
+ retriever = db.as_retriever(search_kwargs={"k": 2})
41
+ qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff", retriever=retriever)
42
+
43
+ try:
44
+ res = qa.run(question)
45
+ except:
46
+ res = "请重新尝试"
47
+ return res
48
+
49
+ def knowledge_base_file(self,file_obj,question):
50
+ if file_obj.name.endswith(".pdf"):
51
+ loader = PyPDFLoader(file_obj.name)
52
+ else:
53
+ loader = UnstructuredFileLoader(file_obj.name)
54
+ document = loader.load()
55
+ # 初始化文本分割器
56
+ text_splitter = RecursiveCharacterTextSplitter(
57
+ chunk_size=500,
58
+ chunk_overlap=100
59
+ )
60
+ docs = text_splitter.split_documents(document)
61
+ db = Chroma.from_documents(docs, self.embeddings)
62
+ retriever = db.as_retriever(search_kwargs={"k": 2})
63
+ qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff", retriever=retriever)
64
+
65
+ try:
66
+ res = qa.run(question)
67
+ except:
68
+ res = "请重新尝试"
69
+ return res
70
+
71
+
sdk/chat_with_net.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ import tool.keyFile as key
3
+ from langchain import LLMMathChain
4
+ from langkit.langchain_helpers import LLMAdaptor
5
+ from langchain.agents import load_tools
6
+ from langchain.agents import initialize_agent
7
+ import os
8
+
9
+
10
+
11
+ class ChatWithNet:
12
+ def __init__(self):
13
+ langkit.api.app_id = key.app_id
14
+ langkit.api.secret_key = key.secret_key
15
+ os.environ['SERPAPI_API_KEY'] = key.serApi_key
16
+ self.model = 'gpt-4-8k'
17
+ self.res = []
18
+
19
+ #superCuculator
20
+ def searhAndChat(self,expression):
21
+ llm = LLMAdaptor(model=self.model)
22
+ tools = load_tools(["serpapi"], llm=llm)
23
+ agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
24
+ try:
25
+ result = agent.run(expression)
26
+ except Exception as e:
27
+ print(e)
28
+ result = "抱歉,我还不知道怎么回答这个问题"
29
+
30
+ self.res.append((expression,result))
31
+
32
+ return self.res
33
+
34
+ def clearHistory(self):
35
+ self.res = []
36
+ return []
37
+
38
+
39
+ # coculator = Cuculator()
40
+ # res = coculator.culculate("1+1")
41
+ # print(res)
42
+
43
+ # tools = load_tools(["serpapi", "llm-math"], llm=llm)
44
+ # agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
45
+ # # 输出结果
46
+ # agent.run("Who isTaylor's boyfriend? What is his current age raised to the 3 power?")
47
+
48
+ # llm_math = LLMMathChain(llm=llm, verbose=True)
49
+ # result = llm_math.run("add(12 20 3 231 4)")
50
+ # print(result)
51
+
sdk/culculate.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ import tool.keyFile as key
3
+ from langchain import LLMMathChain
4
+ from langkit.langchain_helpers import LLMAdaptor
5
+ from langchain.agents import load_tools
6
+ from langchain.agents import initialize_agent
7
+ import os
8
+
9
+
10
+
11
+ class Cuculator:
12
+ def __init__(self):
13
+ langkit.api.app_id = key.app_id
14
+ langkit.api.secret_key = key.secret_key
15
+ os.environ['SERPAPI_API_KEY'] = key.serApi_key
16
+ self.model = 'gpt-4-8k'
17
+ self.res = []
18
+
19
+
20
+
21
+ #superCuculator
22
+ def culculate(self,expression):
23
+ llm = LLMAdaptor(model=self.model)
24
+ llm_math = LLMMathChain(llm=llm, verbose=True)
25
+ ask_flag = True
26
+ try:
27
+ result = llm_math.run(expression)
28
+ except Exception as e:
29
+ print(e)
30
+ ask_flag = False
31
+
32
+
33
+ if not ask_flag:
34
+ tools = load_tools(["serpapi", "llm-math"], llm=llm)
35
+ agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
36
+ result = agent.run(expression)
37
+
38
+ self.res.append((expression,result))
39
+
40
+ return self.res
41
+
42
+ def clearHistory(self):
43
+ self.res = []
44
+ return []
45
+
46
+
47
+ # coculator = Cuculator()
48
+ # res = coculator.culculate("1+1")
49
+ # print(res)
50
+
51
+ # tools = load_tools(["serpapi", "llm-math"], llm=llm)
52
+ # agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
53
+ # # 输出结果
54
+ # agent.run("Who isTaylor's boyfriend? What is his current age raised to the 3 power?")
55
+
56
+ # llm_math = LLMMathChain(llm=llm, verbose=True)
57
+ # result = llm_math.run("add(12 20 3 231 4)")
58
+ # print(result)
59
+
sdk/textSummary.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langkit.api
2
+ from langkit.langchain_helpers import LLMAdaptor
3
+ from langchain.document_loaders import UnstructuredFileLoader, PyPDFLoader
4
+ from langchain.chains.summarize import load_summarize_chain
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain.document_loaders import WebBaseLoader
8
+
9
+ import tool.keyFile as key
10
+ langkit.api.app_id = key.app_id
11
+ langkit.api.secret_key = key.secret_key
12
+
13
+ #长文总结
14
+ class SummaryAssistant:
15
+ def __init__(self):
16
+ self.llm = LLMAdaptor(model='gpt-4-8k')
17
+
18
+ def summary_text(self,text):
19
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50)
20
+ docs = text_splitter.create_documents([text])
21
+ prompt_template = """写出以下内容的简洁摘要:
22
+ {text}
23
+ 简洁摘要:"""
24
+ PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
25
+ chain = load_summarize_chain(self.llm, chain_type="refine",verbose=True,question_prompt=PROMPT)
26
+ try:
27
+ res = chain.run(docs)
28
+ except:
29
+ res = "生成摘要失败,请重新尝试"
30
+ return res
31
+
32
+ def summary_url(self,url):
33
+ loader = WebBaseLoader(url)
34
+ documents = loader.load()
35
+ prompt_template = """写出以下内容的简洁摘要:
36
+ {text}
37
+ 简洁摘要:"""
38
+ PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
39
+ chain = load_summarize_chain(self.llm, chain_type="refine",verbose=True,question_prompt=PROMPT)
40
+ try:
41
+ res = chain.run(documents)
42
+ except:
43
+ res = "生成摘要失败,请重新尝试"
44
+ return res
45
+
46
+ def summary_file(self,file_obj):
47
+ if file_obj.name.endswith(".pdf"):
48
+ loader = PyPDFLoader(file_obj.name)
49
+ else:
50
+ loader = UnstructuredFileLoader(file_obj.name)
51
+ document = loader.load()
52
+ # 初始化文本分割器
53
+ text_splitter = RecursiveCharacterTextSplitter(
54
+ chunk_size=500,
55
+ chunk_overlap=100
56
+ )
57
+
58
+ # 切分文本
59
+ split_documents = text_splitter.split_documents(document)
60
+ prompt_template = """写出以下内容的简洁摘要:
61
+ {text}
62
+ 简洁摘要:"""
63
+ PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
64
+ chain = load_summarize_chain(self.llm, chain_type="refine",verbose=False,question_prompt=PROMPT)
65
+ try:
66
+ res = chain.run(split_documents)
67
+ except:
68
+ res = "生成摘要失败,请重新尝试"
69
+ return res
70
+
71
+ # if __name__ == "__main__":
72
+ # summaryAssistant = SummaryAssistant()
73
+ # print(summaryAssistant.summary_text("你好"))
74
+ # print(summaryAssistant.summary_url("https://mp.weixin.qq.com/s/RhzHa1oMd0WHk0JamdfVRA"))
75
+ # print(summaryAssistant.summary_file("test.txt"))
tool/__pycache__/getSign.cpython-39.pyc ADDED
Binary file (844 Bytes). View file
 
tool/__pycache__/keyFile.cpython-39.pyc ADDED
Binary file (329 Bytes). View file
 
tool/getSign.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hmac
2
+ import hashlib
3
+ from collections import OrderedDict
4
+ import time
5
+
6
+ def sign(params, body, app_id, secret_key):
7
+
8
+ # 1. 构建认证字符串前缀,格式为 bot-auth-v1/{appId}/{timestamp}, timestamp为时间戳,精确到毫秒,用以验证请求是否失效
9
+ auth_string_prefix = f"bot-auth-v1/{app_id}/{int(time.time() * 1000)}/"
10
+ sb = [auth_string_prefix]
11
+
12
+ # 2. 构建url参数字符串,按照参数名字典序升序排列
13
+ if params:
14
+ ordered_params = OrderedDict(sorted(params.items()))
15
+ sb.extend(["{}={}&".format(k, v) for k, v in ordered_params.items()])
16
+
17
+ # 3. 拼接签名原文字符串
18
+ sign_str = "".join(sb) + body
19
+
20
+ # 4. hmac_sha_256算法签名
21
+ signature = hmac.new(secret_key.encode('utf-8'), sign_str.encode('utf-8'), hashlib.sha256).hexdigest()
22
+
23
+ # 5. 拼接认证字符串
24
+ return auth_string_prefix + signature
tool/keyFile.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ app_id = 'auto_58d4be458b124966b29c61fad4186a2c'
2
+ secret_key = 'ce2aca400cf64be0b582a66fa2362817'
3
+ serApi_key = 'fc4668c995941ca731a8a03a178d9fdefbc261d48c76f785db07454bd2c20254'