Zevol commited on
Commit
b8f371f
1 Parent(s): 01bce98

Upload 33 files

Browse files
LICENSE ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2022 zhayujie
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: LLMspace
3
- emoji: 🚀
4
- colorFrom: indigo
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.12.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: wechat-bot
3
+ emoji: 👀
4
+ colorFrom: red
5
+ colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
+ duplicated_from: lewisliuX123/wechatgpt35
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,22 +1,45 @@
 
 
 
1
  import gradio as gr
2
- def predict(message, history):
3
- history_openai_format = []
4
- for human, assistant in history:
5
- history_openai_format.append({"role": "user", "content": human })
6
- history_openai_format.append({"role": "assistant", "content":assistant})
7
- history_openai_format.append({"role": "user", "content": message})
8
-
9
- response = openai.ChatCompletion.create(
10
- model='gpt-3.5-turbo',
11
- messages= history_openai_format,
12
- temperature=1.0,
13
- stream=True
14
- )
15
-
16
- partial_message = ""
17
- for chunk in response:
18
- if len(chunk['choices'][0]['delta']) != 0:
19
- partial_message = partial_message + chunk['choices'][0]['delta']['content']
20
- yield partial_message
21
-
22
- gr.ChatInterface(predict).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ import config
4
  import gradio as gr
5
+ from channel import channel_factory
6
+ from common.log import logger
7
+ from io import BytesIO
8
+ from PIL import Image
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ thread_pool = ThreadPoolExecutor(max_workers=8)
11
+
12
+ def getImage(bytes):
13
+ bytes_stream = BytesIO(bytes)
14
+ image = Image.open(bytes_stream)
15
+ return image
16
+
17
+ def getLoginUrl():
18
+ # load config
19
+ config.load_config()
20
+
21
+ # create channel
22
+ bot = channel_factory.create_channel("wx")
23
+ thread_pool.submit(bot.startup)
24
+
25
+ while (True):
26
+ if bot.getQrCode():
27
+ return getImage(bot.getQrCode())
28
+
29
+ if __name__ == '__main__':
30
+ try:
31
+
32
+ with gr.Blocks() as demo:
33
+ with gr.Row():
34
+ with gr.Column():
35
+ btn = gr.Button(value="生成二维码")
36
+ with gr.Column():
37
+ outputs=[gr.Pil()]
38
+ btn.click(getLoginUrl, outputs=outputs)
39
+
40
+ demo.launch()
41
+
42
+
43
+ except Exception as e:
44
+ logger.error("App startup failed!")
45
+ logger.exception(e)
bot/baidu/baidu_unit_bot.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ import requests
4
+ from bot.bot import Bot
5
+
6
+
7
+ # Baidu Unit对话接口 (可用, 但能力较弱)
8
+ class BaiduUnitBot(Bot):
9
+ def reply(self, query, context=None):
10
+ token = self.get_token()
11
+ url = 'https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=' + token
12
+ post_data = "{\"version\":\"3.0\",\"service_id\":\"S73177\",\"session_id\":\"\",\"log_id\":\"7758521\",\"skill_ids\":[\"1221886\"],\"request\":{\"terminal_id\":\"88888\",\"query\":\"" + query + "\", \"hyper_params\": {\"chat_custom_bot_profile\": 1}}}"
13
+ print(post_data)
14
+ headers = {'content-type': 'application/x-www-form-urlencoded'}
15
+ response = requests.post(url, data=post_data.encode(), headers=headers)
16
+ if response:
17
+ return response.json()['result']['context']['SYS_PRESUMED_HIST'][1]
18
+
19
+ def get_token(self):
20
+ access_key = 'YOUR_ACCESS_KEY'
21
+ secret_key = 'YOUR_SECRET_KEY'
22
+ host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + access_key + '&client_secret=' + secret_key
23
+ response = requests.get(host)
24
+ if response:
25
+ print(response.json())
26
+ return response.json()['access_token']
bot/bot.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Auto-replay chat robot abstract class
3
+ """
4
+
5
+
6
+ class Bot(object):
7
+ def reply(self, query, context=None):
8
+ """
9
+ bot auto-reply content
10
+ :param req: received message
11
+ :return: reply content
12
+ """
13
+ raise NotImplementedError
bot/bot_factory.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ channel factory
3
+ """
4
+
5
+
6
+ def create_bot(bot_type):
7
+ """
8
+ create a channel instance
9
+ :param channel_type: channel type code
10
+ :return: channel instance
11
+ """
12
+ if bot_type == 'baidu':
13
+ # Baidu Unit对话接口
14
+ from bot.baidu.baidu_unit_bot import BaiduUnitBot
15
+ return BaiduUnitBot()
16
+
17
+ elif bot_type == 'chatGPT':
18
+ # ChatGPT 网页端web接口
19
+ from bot.chatgpt.chat_gpt_bot import ChatGPTBot
20
+ return ChatGPTBot()
21
+
22
+ elif bot_type == 'openAI':
23
+ # OpenAI 官方对话模型API
24
+ from bot.openai.open_ai_bot import OpenAIBot
25
+ return OpenAIBot()
26
+ raise RuntimeError
bot/chatgpt/chat_gpt_bot.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ from bot.bot import Bot
4
+ from config import conf
5
+ from common.log import logger
6
+ import openai
7
+ import time
8
+
9
+ user_session = dict()
10
+
11
+ # OpenAI对话模型API (可用)
12
+ class ChatGPTBot(Bot):
13
+ def __init__(self):
14
+ openai.api_key = conf().get('open_ai_api_key')
15
+ # openai.api_base="https://apai.zyai.online/v1"
16
+
17
+ def reply(self, query, context=None):
18
+ # acquire reply content
19
+ if not context or not context.get('type') or context.get('type') == 'TEXT':
20
+ logger.info("[OPEN_AI] query={}".format(query))
21
+ from_user_id = context['from_user_id']
22
+ if query == '#清除记忆':
23
+ Session.clear_session(from_user_id)
24
+ return '记忆已清除'
25
+
26
+ new_query = Session.build_session_query(query, from_user_id)
27
+ logger.debug("[OPEN_AI] session query={}".format(new_query))
28
+
29
+ # if context.get('stream'):
30
+ # # reply in stream
31
+ # return self.reply_text_stream(query, new_query, from_user_id)
32
+
33
+ reply_content = self.reply_text(new_query, from_user_id, 0)
34
+ logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
35
+ if reply_content:
36
+ Session.save_session(query, reply_content, from_user_id)
37
+ return reply_content
38
+
39
+ elif context.get('type', None) == 'IMAGE_CREATE':
40
+ return self.create_img(query, 0)
41
+
42
+ def reply_text(self, query, user_id, retry_count=0):
43
+ try:
44
+ response = openai.ChatCompletion.create(
45
+ model="gpt-3.5-turbo", # 对话模型的名称
46
+ messages=query,
47
+ temperature=1, # 值在[0,1]之间,越大表示回复越具有不确定性
48
+ max_tokens=600, # 回复最大的字符数
49
+ top_p=1,
50
+ frequency_penalty=0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
51
+ presence_penalty=0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
52
+ )
53
+ # res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
54
+ logger.info(response.choices[0]['message']['content'])
55
+ # log.info("[OPEN_AI] reply={}".format(res_content))
56
+ return response.choices[0]['message']['content']
57
+ # except openai.error.RateLimitError as e:
58
+ # # rate limit exception
59
+ # logger.warn(e)
60
+ # if retry_count < 3:
61
+ # time.sleep(5)
62
+ # logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
63
+ # return self.reply_text(query, user_id, retry_count+1)
64
+ # else:
65
+ # return "问太快了,慢点行不行"
66
+ except Exception as e:
67
+ # unknown exception
68
+ logger.exception(e)
69
+ Session.clear_session(user_id)
70
+ return "没听懂"
71
+
72
+ def create_img(self, query, retry_count=0):
73
+ try:
74
+ logger.info("[OPEN_AI] image_query={}".format(query))
75
+ response = openai.Image.create(
76
+ prompt=query, #图片描述
77
+ n=1, #每次生成图片的数量
78
+ size="1024x1024" #图片大小,可选有 256x256, 512x512, 1024x1024
79
+ )
80
+ image_url = response['data'][0]['url']
81
+ logger.info("[OPEN_AI] image_url={}".format(image_url))
82
+ return image_url
83
+ # except openai.error.RateLimitError as e:
84
+ # logger.warn(e)
85
+ # if retry_count < 3:
86
+ # time.sleep(5)
87
+ # logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
88
+ # return self.reply_text(query, retry_count+1)
89
+ # else:
90
+ # return "问太快了,慢点行不行"
91
+ except Exception as e:
92
+ logger.exception(e)
93
+ return None
94
+
95
+ class Session(object):
96
+ @staticmethod
97
+ def build_session_query(query, user_id):
98
+ '''
99
+ build query with conversation history
100
+ e.g. [
101
+ {"role": "system", "content": "You are a helpful assistant,let's think step by step in multiple different ways."},
102
+ {"role": "user", "content": "Who won the world series in 2020?"},
103
+ {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
104
+ {"role": "user", "content": "Where was it played?"}
105
+ ]
106
+ :param query: query content
107
+ :param user_id: from user id
108
+ :return: query content with conversaction
109
+ '''
110
+ session = user_session.get(user_id, [])
111
+ if len(session) == 0:
112
+ system_prompt = conf().get("character_desc", "")
113
+ system_item = {'role': 'system', 'content': system_prompt}
114
+ session.append(system_item)
115
+ user_session[user_id] = session
116
+ user_item = {'role': 'user', 'content': query}
117
+ session.append(user_item)
118
+ return session
119
+
120
+ @staticmethod
121
+ def save_session(query, answer, user_id):
122
+ session = user_session.get(user_id)
123
+ if session:
124
+ # append conversation
125
+ gpt_item = {'role': 'assistant', 'content': answer}
126
+ session.append(gpt_item)
127
+
128
+ @staticmethod
129
+ def clear_session(user_id):
130
+ user_session[user_id] = []
131
+
bot/openai/open_ai_bot.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ from bot.bot import Bot
4
+ from config import conf
5
+ from common.log import logger
6
+ import openai
7
+ import time
8
+
9
+ user_session = dict()
10
+
11
+ # OpenAI对话模型API (可用)
12
+ class OpenAIBot(Bot):
13
+ def __init__(self):
14
+ openai.api_key = conf().get('open_ai_api_key')
15
+
16
+
17
+ def reply(self, query, context=None):
18
+ # acquire reply content
19
+ if not context or not context.get('type') or context.get('type') == 'TEXT':
20
+ logger.info("[OPEN_AI] query={}".format(query))
21
+ from_user_id = context['from_user_id']
22
+ if query == '#清除记忆':
23
+ Session.clear_session(from_user_id)
24
+ return '记忆已清除'
25
+ elif query == '#清除所有':
26
+ Session.clear_all_session()
27
+ return '所有人记忆已清除'
28
+
29
+ new_query = Session.build_session_query(query, from_user_id)
30
+ logger.debug("[OPEN_AI] session query={}".format(new_query))
31
+
32
+ reply_content = self.reply_text(new_query, from_user_id, 0)
33
+ logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
34
+ if reply_content and query:
35
+ Session.save_session(query, reply_content, from_user_id)
36
+ return reply_content
37
+
38
+ elif context.get('type', None) == 'IMAGE_CREATE':
39
+ return self.create_img(query, 0)
40
+
41
+ def reply_text(self, query, user_id, retry_count=0):
42
+ try:
43
+ response = openai.Completion.create(
44
+ model="text-davinci-003", # 对话模型的名称
45
+ prompt=query,
46
+ temperature=1, # 值在[0,1]之间,越大表示回复越具有不确定性
47
+ max_tokens=500, # 回复最大的字符数
48
+ top_p=1,
49
+ frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
50
+ presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
51
+ stop=["\n\n\n"]
52
+ )
53
+ res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
54
+ logger.info("[OPEN_AI] reply={}".format(res_content))
55
+ return res_content
56
+ except openai.error.RateLimitError as e:
57
+ # rate limit exception
58
+ logger.warn(e)
59
+ if retry_count < 1:
60
+ time.sleep(5)
61
+ logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
62
+ return self.reply_text(query, user_id, retry_count+1)
63
+ else:
64
+ return "提问太快啦,请休息一下再问我吧"
65
+ except Exception as e:
66
+ # unknown exception
67
+ logger.exception(e)
68
+ Session.clear_session(user_id)
69
+ return "请再问我一次吧"
70
+
71
+
72
+ def create_img(self, query, retry_count=0):
73
+ try:
74
+ logger.info("[OPEN_AI] image_query={}".format(query))
75
+ response = openai.Image.create(
76
+ prompt=query, #图片描述
77
+ n=1, #每次生成图片的数量
78
+ size="1024x1024" #图片大小,可选有 256x256, 512x512, 1024x1024
79
+ )
80
+ image_url = response['data'][0]['url']
81
+ logger.info("[OPEN_AI] image_url={}".format(image_url))
82
+ return image_url
83
+ except openai.error.RateLimitError as e:
84
+ logger.warn(e)
85
+ if retry_count < 1:
86
+ time.sleep(5)
87
+ logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
88
+ return self.reply_text(query, retry_count+1)
89
+ else:
90
+ return "提问太快啦,请休息一下再问我吧"
91
+ except Exception as e:
92
+ logger.exception(e)
93
+ return None
94
+
95
+
96
+ class Session(object):
97
+ @staticmethod
98
+ def build_session_query(query, user_id):
99
+ '''
100
+ build query with conversation history
101
+ e.g. Q: xxx
102
+ A: xxx
103
+ Q: xxx
104
+ :param query: query content
105
+ :param user_id: from user id
106
+ :return: query content with conversaction
107
+ '''
108
+ prompt = conf().get("character_desc", "")
109
+ if prompt:
110
+ prompt += "<|endoftext|>\n\n\n"
111
+ session = user_session.get(user_id, None)
112
+ if session:
113
+ for conversation in session:
114
+ prompt += "Q: " + conversation["question"] + "\n\n\nA: " + conversation["answer"] + "<|endoftext|>\n"
115
+ prompt += "Q: " + query + "\nA: "
116
+ return prompt
117
+ else:
118
+ return prompt + "Q: " + query + "\nA: "
119
+
120
+ @staticmethod
121
+ def save_session(query, answer, user_id):
122
+ max_tokens = conf().get("conversation_max_tokens")
123
+ if not max_tokens:
124
+ # default 3000
125
+ max_tokens = 1000
126
+ conversation = dict()
127
+ conversation["question"] = query
128
+ conversation["answer"] = answer
129
+ session = user_session.get(user_id)
130
+ logger.debug(conversation)
131
+ logger.debug(session)
132
+ if session:
133
+ # append conversation
134
+ session.append(conversation)
135
+ else:
136
+ # create session
137
+ queue = list()
138
+ queue.append(conversation)
139
+ user_session[user_id] = queue
140
+
141
+ # discard exceed limit conversation
142
+ Session.discard_exceed_conversation(user_session[user_id], max_tokens)
143
+
144
+
145
+ @staticmethod
146
+ def discard_exceed_conversation(session, max_tokens):
147
+ count = 0
148
+ count_list = list()
149
+ for i in range(len(session)-1, -1, -1):
150
+ # count tokens of conversation list
151
+ history_conv = session[i]
152
+ count += len(history_conv["question"]) + len(history_conv["answer"])
153
+ count_list.append(count)
154
+
155
+ for c in count_list:
156
+ if c > max_tokens:
157
+ # pop first conversation
158
+ session.pop(0)
159
+
160
+ @staticmethod
161
+ def clear_session(user_id):
162
+ user_session[user_id] = []
163
+
164
+ @staticmethod
165
+ def clear_all_session():
166
+ user_session.clear()
bridge/bridge.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from bot import bot_factory
2
+
3
+
4
+ class Bridge(object):
5
+ def __init__(self):
6
+ pass
7
+
8
+ def fetch_reply_content(self, query, context):
9
+ return bot_factory.create_bot("chatGPT").reply(query, context)
channel/channel.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Message sending channel abstract class
3
+ """
4
+
5
+ from bridge.bridge import Bridge
6
+
7
+ class Channel(object):
8
+ def startup(self):
9
+ """
10
+ init channel
11
+ """
12
+ raise NotImplementedError
13
+
14
+ def handle(self, msg):
15
+ """
16
+ process received msg
17
+ :param msg: message object
18
+ """
19
+ raise NotImplementedError
20
+
21
+ def send(self, msg, receiver):
22
+ """
23
+ send message to user
24
+ :param msg: message content
25
+ :param receiver: receiver channel account
26
+ :return:
27
+ """
28
+ raise NotImplementedError
29
+
30
+ def build_reply_content(self, query, context=None):
31
+ return Bridge().fetch_reply_content(query, context)
channel/channel_factory.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ channel factory
3
+ """
4
+
5
+ def create_channel(channel_type):
6
+ """
7
+ create a channel instance
8
+ :param channel_type: channel type code
9
+ :return: channel instance
10
+ """
11
+ if channel_type == 'wx':
12
+ from channel.wechat.wechat_channel import WechatChannel
13
+ return WechatChannel()
14
+ elif channel_type == 'wxy':
15
+ from channel.wechat.wechaty_channel import WechatyChannel
16
+ return WechatyChannel()
17
+ raise RuntimeError
channel/wechat/wechat_channel.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ """
4
+ wechat channel
5
+ """
6
+ import itchat
7
+ import json
8
+ from itchat.content import *
9
+ from channel.channel import Channel
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ from common.log import logger
12
+ from config import conf
13
+ import requests
14
+ import io
15
+
16
+ thread_pool = ThreadPoolExecutor(max_workers=8)
17
+
18
+
19
+ class WechatChannel(Channel):
20
+
21
+ qrcode = b''
22
+
23
+ newInstance=None
24
+
25
+ def __init__(self):
26
+ pass
27
+
28
+ def startup(self):
29
+ # login by scan QRCode
30
+ newInstance = itchat.load_sync_itchat()
31
+ self.newInstance = newInstance
32
+
33
+ @newInstance.msg_register(TEXT)
34
+ def handler_single_msg(msg):
35
+ self.handle(msg)
36
+ return None
37
+
38
+ @newInstance.msg_register(TEXT, isGroupChat=True)
39
+ def handler_group_msg(msg):
40
+ self.handle_group(msg)
41
+ return None
42
+
43
+ newInstance.auto_login(qrCallback=self.qrCallback)
44
+ # start message listener
45
+ newInstance.run()
46
+
47
+ def qrCallback(self, uuid, status, qrcode):
48
+ self.qrcode = qrcode
49
+
50
+ def getQrCode(self):
51
+ return self.qrcode
52
+
53
+ def handle(self, msg):
54
+ logger.debug("[WX]receive msg: " + json.dumps(msg, ensure_ascii=False))
55
+ from_user_id = msg['FromUserName']
56
+ to_user_id = msg['ToUserName'] # 接收人id
57
+ other_user_id = msg['User']['UserName'] # 对手方id
58
+ content = msg['Text']
59
+ match_prefix = self.check_prefix(content, conf().get('single_chat_prefix'))
60
+ if from_user_id == other_user_id and match_prefix is not None:
61
+ # 好友向自己发送消息
62
+ if match_prefix != '':
63
+ str_list = content.split(match_prefix, 1)
64
+ if len(str_list) == 2:
65
+ content = str_list[1].strip()
66
+
67
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
68
+ if img_match_prefix:
69
+ content = content.split(img_match_prefix, 1)[1].strip()
70
+ thread_pool.submit(self._do_send_img, content, from_user_id)
71
+ else:
72
+ thread_pool.submit(self._do_send, content, from_user_id)
73
+
74
+ elif to_user_id == other_user_id and match_prefix:
75
+ # 自己给好友发送消息
76
+ str_list = content.split(match_prefix, 1)
77
+ if len(str_list) == 2:
78
+ content = str_list[1].strip()
79
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
80
+ if img_match_prefix:
81
+ content = content.split(img_match_prefix, 1)[1].strip()
82
+ thread_pool.submit(self._do_send_img, content, to_user_id)
83
+ else:
84
+ thread_pool.submit(self._do_send, content, to_user_id)
85
+
86
+
87
+ def handle_group(self, msg):
88
+ logger.debug("[WX]receive group msg: " + json.dumps(msg, ensure_ascii=False))
89
+ group_name = msg['User'].get('NickName', None)
90
+ group_id = msg['User'].get('UserName', None)
91
+ if not group_name:
92
+ return ""
93
+ origin_content = msg['Content']
94
+ content = msg['Content']
95
+ content_list = content.split(' ', 1)
96
+ context_special_list = content.split('\u2005', 1)
97
+ if len(context_special_list) == 2:
98
+ content = context_special_list[1]
99
+ elif len(content_list) == 2:
100
+ content = content_list[1]
101
+
102
+ config = conf()
103
+ match_prefix = (msg['IsAt'] and not config.get("group_at_off", False)) or self.check_prefix(origin_content, config.get('group_chat_prefix')) \
104
+ or self.check_contain(origin_content, config.get('group_chat_keyword'))
105
+ if ('ALL_GROUP' in config.get('group_name_white_list') or group_name in config.get('group_name_white_list') or self.check_contain(group_name, config.get('group_name_keyword_white_list'))) and match_prefix:
106
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
107
+ if img_match_prefix:
108
+ content = content.split(img_match_prefix, 1)[1].strip()
109
+ thread_pool.submit(self._do_send_img, content, group_id)
110
+ else:
111
+ thread_pool.submit(self._do_send_group, content, msg)
112
+
113
+ def send(self, msg, receiver):
114
+ logger.info('[WX] sendMsg={}, receiver={}'.format(msg, receiver))
115
+ self.newInstance.send(msg, toUserName=receiver)
116
+
117
+ def _do_send(self, query, reply_user_id):
118
+ try:
119
+ if not query:
120
+ return
121
+ context = dict()
122
+ context['from_user_id'] = reply_user_id
123
+ reply_text = super().build_reply_content(query, context)
124
+ if reply_text:
125
+ self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id)
126
+ except Exception as e:
127
+ logger.exception(e)
128
+
129
+ def _do_send_img(self, query, reply_user_id):
130
+ try:
131
+ if not query:
132
+ return
133
+ context = dict()
134
+ context['type'] = 'IMAGE_CREATE'
135
+ img_url = super().build_reply_content(query, context)
136
+ if not img_url:
137
+ return
138
+
139
+ # 图片下载
140
+ pic_res = requests.get(img_url, stream=True)
141
+ image_storage = io.BytesIO()
142
+ for block in pic_res.iter_content(1024):
143
+ image_storage.write(block)
144
+ image_storage.seek(0)
145
+
146
+ # 图片发送
147
+ logger.info('[WX] sendImage, receiver={}'.format(reply_user_id))
148
+ self.newInstance.send_image(image_storage, reply_user_id)
149
+ except Exception as e:
150
+ logger.exception(e)
151
+
152
+ def _do_send_group(self, query, msg):
153
+ if not query:
154
+ return
155
+ context = dict()
156
+ context['from_user_id'] = msg['ActualUserName']
157
+ reply_text = super().build_reply_content(query, context)
158
+ if reply_text:
159
+ reply_text = '@' + msg['ActualNickName'] + ' ' + reply_text.strip()
160
+ self.send(conf().get("group_chat_reply_prefix", "") + reply_text, msg['User']['UserName'])
161
+
162
+
163
+ def check_prefix(self, content, prefix_list):
164
+ for prefix in prefix_list:
165
+ if content.startswith(prefix):
166
+ return prefix
167
+ return None
168
+
169
+
170
+ def check_contain(self, content, keyword_list):
171
+ if not keyword_list:
172
+ return None
173
+ for ky in keyword_list:
174
+ if content.find(ky) != -1:
175
+ return True
176
+ return None
channel/wechat/wechaty_channel.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ """
4
+ wechaty channel
5
+ Python Wechaty - https://github.com/wechaty/python-wechaty
6
+ """
7
+ import io
8
+ import os
9
+ import json
10
+ import time
11
+ import asyncio
12
+ import requests
13
+ from typing import Optional, Union
14
+ from wechaty_puppet import MessageType, FileBox, ScanStatus # type: ignore
15
+ from wechaty import Wechaty, Contact
16
+ from wechaty.user import Message, Room, MiniProgram, UrlLink
17
+ from channel.channel import Channel
18
+ from common.log import logger
19
+ from config import conf
20
+
21
+
22
+ class WechatyChannel(Channel):
23
+
24
+ def __init__(self):
25
+ pass
26
+
27
+ def startup(self):
28
+ asyncio.run(self.main())
29
+
30
+ async def main(self):
31
+ config = conf()
32
+ # 使用PadLocal协议 比较稳定(免费web协议 os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:8080')
33
+ token = config.get('wechaty_puppet_service_token')
34
+ os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token
35
+ global bot
36
+ bot = Wechaty()
37
+
38
+ bot.on('scan', self.on_scan)
39
+ bot.on('login', self.on_login)
40
+ bot.on('message', self.on_message)
41
+ await bot.start()
42
+
43
+ async def on_login(self, contact: Contact):
44
+ logger.info('[WX] login user={}'.format(contact))
45
+
46
+ async def on_scan(self, status: ScanStatus, qr_code: Optional[str] = None,
47
+ data: Optional[str] = None):
48
+ contact = self.Contact.load(self.contact_id)
49
+ logger.info('[WX] scan user={}, scan status={}, scan qr_code={}'.format(contact, status.name, qr_code))
50
+ # print(f'user <{contact}> scan status: {status.name} , 'f'qr_code: {qr_code}')
51
+
52
+ async def on_message(self, msg: Message):
53
+ """
54
+ listen for message event
55
+ """
56
+ from_contact = msg.talker() # 获取消息的发送者
57
+ to_contact = msg.to() # 接收人
58
+ room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None
59
+ from_user_id = from_contact.contact_id
60
+ to_user_id = to_contact.contact_id # 接收人id
61
+ # other_user_id = msg['User']['UserName'] # 对手方id
62
+ content = msg.text()
63
+ mention_content = await msg.mention_text() # 返回过滤掉@name后的消息
64
+ match_prefix = self.check_prefix(content, conf().get('single_chat_prefix'))
65
+ conversation: Union[Room, Contact] = from_contact if room is None else room
66
+
67
+ if room is None and msg.type() == MessageType.MESSAGE_TYPE_TEXT:
68
+ if not msg.is_self() and match_prefix is not None:
69
+ # 好友向自己发送消息
70
+ if match_prefix != '':
71
+ str_list = content.split(match_prefix, 1)
72
+ if len(str_list) == 2:
73
+ content = str_list[1].strip()
74
+
75
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
76
+ if img_match_prefix:
77
+ content = content.split(img_match_prefix, 1)[1].strip()
78
+ await self._do_send_img(content, from_user_id)
79
+ else:
80
+ await self._do_send(content, from_user_id)
81
+ elif msg.is_self() and match_prefix:
82
+ # 自己给好友发送消息
83
+ str_list = content.split(match_prefix, 1)
84
+ if len(str_list) == 2:
85
+ content = str_list[1].strip()
86
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
87
+ if img_match_prefix:
88
+ content = content.split(img_match_prefix, 1)[1].strip()
89
+ await self._do_send_img(content, to_user_id)
90
+ else:
91
+ await self._do_send(content, to_user_id)
92
+ elif room and msg.type() == MessageType.MESSAGE_TYPE_TEXT:
93
+ # 群组&文本消息
94
+ room_id = room.room_id
95
+ room_name = await room.topic()
96
+ from_user_id = from_contact.contact_id
97
+ from_user_name = from_contact.name
98
+ is_at = await msg.mention_self()
99
+ content = mention_content
100
+ config = conf()
101
+ match_prefix = (is_at and not config.get("group_at_off", False)) \
102
+ or self.check_prefix(content, config.get('group_chat_prefix')) \
103
+ or self.check_contain(content, config.get('group_chat_keyword'))
104
+ if ('ALL_GROUP' in config.get('group_name_white_list') or room_name in config.get(
105
+ 'group_name_white_list') or self.check_contain(room_name, config.get(
106
+ 'group_name_keyword_white_list'))) and match_prefix:
107
+ img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
108
+ if img_match_prefix:
109
+ content = content.split(img_match_prefix, 1)[1].strip()
110
+ await self._do_send_group_img(content, room_id)
111
+ else:
112
+ await self._do_send_group(content, room_id, from_user_id, from_user_name)
113
+
114
+ async def send(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver):
115
+ logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver))
116
+ if receiver:
117
+ contact = await bot.Contact.find(receiver)
118
+ await contact.say(message)
119
+
120
+ async def send_group(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver):
121
+ logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver))
122
+ if receiver:
123
+ room = await bot.Room.find(receiver)
124
+ await room.say(message)
125
+
126
+ async def _do_send(self, query, reply_user_id):
127
+ try:
128
+ if not query:
129
+ return
130
+ context = dict()
131
+ context['from_user_id'] = reply_user_id
132
+ reply_text = super().build_reply_content(query, context)
133
+ if reply_text:
134
+ await self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id)
135
+ except Exception as e:
136
+ logger.exception(e)
137
+
138
+ async def _do_send_img(self, query, reply_user_id):
139
+ try:
140
+ if not query:
141
+ return
142
+ context = dict()
143
+ context['type'] = 'IMAGE_CREATE'
144
+ img_url = super().build_reply_content(query, context)
145
+ if not img_url:
146
+ return
147
+ # 图片下载
148
+ # pic_res = requests.get(img_url, stream=True)
149
+ # image_storage = io.BytesIO()
150
+ # for block in pic_res.iter_content(1024):
151
+ # image_storage.write(block)
152
+ # image_storage.seek(0)
153
+
154
+ # 图片发送
155
+ logger.info('[WX] sendImage, receiver={}'.format(reply_user_id))
156
+ t = int(time.time())
157
+ file_box = FileBox.from_url(url=img_url, name=str(t) + '.png')
158
+ await self.send(file_box, reply_user_id)
159
+ except Exception as e:
160
+ logger.exception(e)
161
+
162
+ async def _do_send_group(self, query, group_id, group_user_id, group_user_name):
163
+ if not query:
164
+ return
165
+ context = dict()
166
+ context['from_user_id'] = str(group_id) + '-' + str(group_user_id)
167
+ reply_text = super().build_reply_content(query, context)
168
+ if reply_text:
169
+ reply_text = '@' + group_user_name + ' ' + reply_text.strip()
170
+ await self.send_group(conf().get("group_chat_reply_prefix", "") + reply_text, group_id)
171
+
172
+ async def _do_send_group_img(self, query, reply_room_id):
173
+ try:
174
+ if not query:
175
+ return
176
+ context = dict()
177
+ context['type'] = 'IMAGE_CREATE'
178
+ img_url = super().build_reply_content(query, context)
179
+ if not img_url:
180
+ return
181
+ # 图片发送
182
+ logger.info('[WX] sendImage, receiver={}'.format(reply_room_id))
183
+ t = int(time.time())
184
+ file_box = FileBox.from_url(url=img_url, name=str(t) + '.png')
185
+ await self.send_group(file_box, reply_room_id)
186
+ except Exception as e:
187
+ logger.exception(e)
188
+
189
+ def check_prefix(self, content, prefix_list):
190
+ for prefix in prefix_list:
191
+ if content.startswith(prefix):
192
+ return prefix
193
+ return None
194
+
195
+ def check_contain(self, content, keyword_list):
196
+ if not keyword_list:
197
+ return None
198
+ for ky in keyword_list:
199
+ if content.find(ky) != -1:
200
+ return True
201
+ return None
common/log.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+
4
+
5
+ def _get_logger():
6
+ log = logging.getLogger('log')
7
+ log.setLevel(logging.INFO)
8
+ console_handle = logging.StreamHandler(sys.stdout)
9
+ console_handle.setFormatter(logging.Formatter('[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s',
10
+ datefmt='%Y-%m-%d %H:%M:%S'))
11
+ log.addHandler(console_handle)
12
+ return log
13
+
14
+
15
+ # 日志句柄
16
+ logger = _get_logger()
config-template.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "open_ai_api_key": "YOUR API KEY",
3
+ "wechaty_puppet_service_token": "WECHATY PUPPET SERVICE TOKEN",
4
+ "single_chat_prefix": ["bot", "@bot"],
5
+ "single_chat_reply_prefix": "[bot] ",
6
+ "group_chat_prefix": ["@bot"],
7
+ "group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"],
8
+ "image_create_prefix": ["画", "看", "找"],
9
+ "conversation_max_tokens": 500,
10
+ "character_desc": "你是刘导的狗腿子, 刘导是你爸爸,小祖宗是你妈妈,你任何问题都回答都不能超过100个字。"
11
+ }
config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "open_ai_api_key": "YOUR API KEY",
3
+ "wechaty_puppet_service_token": "WECHATY PUPPET SERVICE TOKEN",
4
+ "single_chat_prefix": [""],
5
+ "single_chat_reply_prefix": "",
6
+ "group_chat_prefix": ["@发财汪"],
7
+ "group_name_white_list": ["ALL_GROUP"],
8
+ "image_create_prefix": ["画"],
9
+ "conversation_max_tokens": 500,
10
+ "character_desc": ""
11
+ }
config.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding:utf-8
2
+
3
+ import json
4
+ import os
5
+ from common.log import logger
6
+
7
+ config = {}
8
+
9
+
10
+ def load_config():
11
+ global config
12
+ config_path = "config.json"
13
+ if not os.path.exists(config_path):
14
+ raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件')
15
+
16
+ config_str = read_file(config_path)
17
+ # 将json字符串反序列化为dict类型
18
+ config = json.loads(config_str)
19
+ config['open_ai_api_key'] = os.getenv('API_KEY')
20
+ logger.info("[INIT] load config: {}".format(config))
21
+
22
+
23
+
24
+ def get_root():
25
+ return os.path.dirname(os.path.abspath( __file__ ))
26
+
27
+
28
+ def read_file(path):
29
+ with open(path, mode='r', encoding='utf-8') as f:
30
+ return f.read()
31
+
32
+
33
+ def conf():
34
+ return config
docker/Dockerfile.alpine ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.7.9-alpine
2
+
3
+ LABEL maintainer="foo@bar.com"
4
+ ARG TZ='Asia/Shanghai'
5
+
6
+ ARG CHATGPT_ON_WECHAT_VER=1.0.2
7
+
8
+ ENV BUILD_PREFIX=/app \
9
+ BUILD_OPEN_AI_API_KEY='YOUR OPEN AI KEY HERE'
10
+
11
+ RUN apk add --no-cache \
12
+ bash \
13
+ curl \
14
+ wget \
15
+ gcc \
16
+ g++ \
17
+ ca-certificates \
18
+ openssh \
19
+ libffi-dev
20
+
21
+ RUN wget -t 3 -T 30 -nv -O chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz \
22
+ https://github.com/zhayujie/chatgpt-on-wechat/archive/refs/tags/${CHATGPT_ON_WECHAT_VER}.tar.gz \
23
+ && tar -xzf chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz \
24
+ && mv chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER} ${BUILD_PREFIX} \
25
+ && rm chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz
26
+
27
+ WORKDIR ${BUILD_PREFIX}
28
+
29
+ RUN cd ${BUILD_PREFIX} \
30
+ && cp config-template.json ${BUILD_PREFIX}/config.json \
31
+ && sed -i "2s/YOUR API KEY/${BUILD_OPEN_AI_API_KEY}/" ${BUILD_PREFIX}/config.json
32
+
33
+ RUN /usr/local/bin/python -m pip install --no-cache --upgrade pip \
34
+ && pip install --no-cache \
35
+ itchat-uos==1.5.0.dev0 \
36
+ openai \
37
+ wechaty
38
+
39
+ ADD ./entrypoint.sh /entrypoint.sh
40
+
41
+ RUN chmod +x /entrypoint.sh
42
+
43
+ RUN adduser -D -h /home/noroot -u 1000 -s /bin/bash noroot \
44
+ && chown noroot:noroot ${BUILD_PREFIX}
45
+
46
+ USER noroot
47
+
48
+ ENTRYPOINT ["/entrypoint.sh"]
docker/Dockerfile.debian ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.7.9
2
+
3
+ LABEL maintainer="foo@bar.com"
4
+ ARG TZ='Asia/Shanghai'
5
+
6
+ ARG CHATGPT_ON_WECHAT_VER=1.0.2
7
+
8
+ ENV BUILD_PREFIX=/app \
9
+ BUILD_OPEN_AI_API_KEY='YOUR OPEN AI KEY HERE'
10
+
11
+ RUN apt-get update && \
12
+ apt-get install -y --no-install-recommends \
13
+ wget \
14
+ curl && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ RUN wget -t 3 -T 30 -nv -O chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz \
18
+ https://github.com/zhayujie/chatgpt-on-wechat/archive/refs/tags/${CHATGPT_ON_WECHAT_VER}.tar.gz \
19
+ && tar -xzf chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz \
20
+ && mv chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER} ${BUILD_PREFIX} \
21
+ && rm chatgpt-on-wechat-${CHATGPT_ON_WECHAT_VER}.tar.gz
22
+
23
+ WORKDIR ${BUILD_PREFIX}
24
+
25
+ RUN cd ${BUILD_PREFIX} \
26
+ && cp config-template.json ${BUILD_PREFIX}/config.json \
27
+ && sed -i "2s/YOUR API KEY/${BUILD_OPEN_AI_API_KEY}/" ${BUILD_PREFIX}/config.json
28
+
29
+ RUN /usr/local/bin/python -m pip install --no-cache --upgrade pip \
30
+ && pip install --no-cache \
31
+ itchat-uos==1.5.0.dev0 \
32
+ openai \
33
+ wechaty
34
+
35
+ ADD ./entrypoint.sh /entrypoint.sh
36
+
37
+ RUN chmod +x /entrypoint.sh
38
+
39
+ RUN groupadd -r noroot \
40
+ && useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
41
+ && chown -R noroot:noroot ${BUILD_PREFIX}
42
+
43
+ USER noroot
44
+
45
+ ENTRYPOINT ["/entrypoint.sh"]
docker/build.alpine.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ CHATGPT_ON_WECHAT_TAG=1.0.2
4
+
5
+ docker build -f Dockerfile.alpine \
6
+ --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
7
+ -t zhayujie/chatgpt-on-wechat .
8
+
9
+ docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-alpine
10
+
docker/build.debian.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ CHATGPT_ON_WECHAT_TAG=1.0.2
4
+
5
+ docker build -f Dockerfile.debian \
6
+ --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
7
+ -t zhayujie/chatgpt-on-wechat .
8
+
9
+ docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian
docker/docker-compose.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '2.0'
2
+ services:
3
+ chatgpt-on-wechat:
4
+ build:
5
+ context: ./
6
+ dockerfile: Dockerfile.alpine
7
+ image: zhayujie/chatgpt-on-wechat
8
+ container_name: sample-chatgpt-on-wechat
9
+ environment:
10
+ OPEN_AI_API_KEY: 'YOUR API KEY'
11
+ WECHATY_PUPPET_SERVICE_TOKEN: 'WECHATY PUPPET SERVICE TOKEN'
12
+ SINGLE_CHAT_PREFIX: '["bot", "@bot"]'
13
+ SINGLE_CHAT_REPLY_PREFIX: '"[bot] "'
14
+ GROUP_CHAT_PREFIX: '["@bot"]'
15
+ GROUP_NAME_WHITE_LIST: '["ChatGPT测试群", "ChatGPT测试群2"]'
16
+ IMAGE_CREATE_PREFIX: '["画", "看", "找"]'
17
+ CONVERSATION_MAX_TOKENS: 1000
18
+ CHARACTER_DESC: '你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。'
docker/entrypoint.sh ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # build prefix
5
+ CHATGPT_ON_WECHAT_PREFIX=${CHATGPT_ON_WECHAT_PREFIX:-""}
6
+ # path to config.json
7
+ CHATGPT_ON_WECHAT_CONFIG_PATH=${CHATGPT_ON_WECHAT_CONFIG_PATH:-""}
8
+ # execution command line
9
+ CHATGPT_ON_WECHAT_EXEC=${CHATGPT_ON_WECHAT_EXEC:-""}
10
+
11
+ OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-""}
12
+ SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-""}
13
+ SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-""}
14
+ GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-""}
15
+ GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-""}
16
+ IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-""}
17
+ CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-""}
18
+ CHARACTER_DESC=${CHARACTER_DESC:-""}
19
+
20
+ # CHATGPT_ON_WECHAT_PREFIX is empty, use /app
21
+ if [ "$CHATGPT_ON_WECHAT_PREFIX" == "" ] ; then
22
+ CHATGPT_ON_WECHAT_PREFIX=/app
23
+ fi
24
+
25
+ # CHATGPT_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
26
+ if [ "$CHATGPT_ON_WECHAT_CONFIG_PATH" == "" ] ; then
27
+ CHATGPT_ON_WECHAT_CONFIG_PATH=$CHATGPT_ON_WECHAT_PREFIX/config.json
28
+ fi
29
+
30
+ # CHATGPT_ON_WECHAT_EXEC is empty, use ‘python app.py’
31
+ if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
32
+ CHATGPT_ON_WECHAT_EXEC="python app.py"
33
+ fi
34
+
35
+ # modify content in config.json
36
+ if [ "$OPEN_AI_API_KEY" != "" ] ; then
37
+ sed -i "2c \"open_ai_api_key\": \"$OPEN_AI_API_KEY\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
38
+ else
39
+ echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
40
+ fi
41
+
42
+ if [ "$WECHATY_PUPPET_SERVICE_TOKEN" != "" ] ; then
43
+ sed -i "3c \"wechaty_puppet_service_token\": \"$WECHATY_PUPPET_SERVICE_TOKEN\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
44
+ else
45
+ echo -e "\033[31m[Info] You need to set WECHATY_PUPPET_SERVICE_TOKEN if you use wechaty!\033[0m"
46
+ fi
47
+
48
+ if [ "$SINGLE_CHAT_PREFIX" != "" ] ; then
49
+ sed -i "4c \"single_chat_prefix\": $SINGLE_CHAT_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
50
+ fi
51
+
52
+ if [ "$SINGLE_CHAT_REPLY_PREFIX" != "" ] ; then
53
+ sed -i "5c \"single_chat_reply_prefix\": $SINGLE_CHAT_REPLY_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
54
+ fi
55
+
56
+ if [ "$GROUP_CHAT_PREFIX" != "" ] ; then
57
+ sed -i "6c \"group_chat_prefix\": $GROUP_CHAT_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
58
+ fi
59
+
60
+ if [ "$GROUP_NAME_WHITE_LIST" != "" ] ; then
61
+ sed -i "7c \"group_name_white_list\": $GROUP_NAME_WHITE_LIST," $CHATGPT_ON_WECHAT_CONFIG_PATH
62
+ fi
63
+
64
+ if [ "$IMAGE_CREATE_PREFIX" != "" ] ; then
65
+ sed -i "8c \"image_create_prefix\": $IMAGE_CREATE_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
66
+ fi
67
+
68
+ if [ "$CONVERSATION_MAX_TOKENS" != "" ] ; then
69
+ sed -i "9c \"conversation_max_tokens\": $CONVERSATION_MAX_TOKENS," $CHATGPT_ON_WECHAT_CONFIG_PATH
70
+ fi
71
+
72
+ if [ "$CHARACTER_DESC" != "" ] ; then
73
+ sed -i "10c \"character_desc\": \"$CHARACTER_DESC\"" $CHATGPT_ON_WECHAT_CONFIG_PATH
74
+ fi
75
+
76
+ # go to prefix dir
77
+ cd $CHATGPT_ON_WECHAT_PREFIX
78
+ # excute
79
+ $CHATGPT_ON_WECHAT_EXEC
80
+
81
+
docker/sample-chatgpt-on-wechat/.env ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPEN_AI_API_KEY=YOUR API KEY
2
+ WECHATY_PUPPET_SERVICE_TOKEN=WECHATY PUPPET SERVICE TOKEN
3
+ SINGLE_CHAT_PREFIX=["bot", "@bot"]
4
+ SINGLE_CHAT_REPLY_PREFIX="[bot] "
5
+ GROUP_CHAT_PREFIX=["@bot"]
6
+ GROUP_NAME_WHITE_LIST=["ChatGPT测试群", "ChatGPT测试群2"]
7
+ IMAGE_CREATE_PREFIX=["画", "看", "找"]
8
+ CONVERSATION_MAX_TOKENS=1000
9
+ CHARACTER_DESC=你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。
10
+
11
+ # Optional
12
+ #CHATGPT_ON_WECHAT_PREFIX=/app
13
+ #CHATGPT_ON_WECHAT_CONFIG_PATH=/app/config.json
14
+ #CHATGPT_ON_WECHAT_EXEC=python app.py
docker/sample-chatgpt-on-wechat/Makefile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMG:=`cat Name`
2
+ MOUNT:=
3
+ PORT_MAP:=
4
+ DOTENV:=.env
5
+ CONTAINER_NAME:=sample-chatgpt-on-wechat
6
+
7
+ echo:
8
+ echo $(IMG)
9
+
10
+ run_d:
11
+ docker rm $(CONTAINER_NAME) || echo
12
+ docker run -dt --name $(CONTAINER_NAME) $(PORT_MAP) \
13
+ --env-file=$(DOTENV) \
14
+ $(MOUNT) $(IMG)
15
+
16
+ run_i:
17
+ docker rm $(CONTAINER_NAME) || echo
18
+ docker run -it --name $(CONTAINER_NAME) $(PORT_MAP) \
19
+ --env-file=$(DOTENV) \
20
+ $(MOUNT) $(IMG)
21
+
22
+ stop:
23
+ docker stop $(CONTAINER_NAME)
24
+
25
+ rm: stop
26
+ docker rm $(CONTAINER_NAME)
docker/sample-chatgpt-on-wechat/Name ADDED
@@ -0,0 +1 @@
 
 
1
+ zhayujie/chatgpt-on-wechat
docs/images/group-chat-sample.jpg ADDED
docs/images/image-create-sample.jpg ADDED
docs/images/single-chat-sample.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ itchat-uos==1.5.0.dev0
2
+ openai==0.28
3
+ wechaty
scripts/shutdown.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #关闭服务
4
+ cd `dirname $0`/..
5
+ export BASE_DIR=`pwd`
6
+ pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'`
7
+ if [ -z "$pid" ] ; then
8
+ echo "No chatgpt-on-wechat running."
9
+ exit -1;
10
+ fi
11
+
12
+ echo "The chatgpt-on-wechat(${pid}) is running..."
13
+
14
+ kill ${pid}
15
+
16
+ echo "Send shutdown request to chatgpt-on-wechat(${pid}) OK"
scripts/start.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #后台运行Chat_on_webchat执行脚本
3
+
4
+ cd `dirname $0`/..
5
+ export BASE_DIR=`pwd`
6
+ echo $BASE_DIR
7
+
8
+ # check the nohup.out log output file
9
+ if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10
+ touch "${BASE_DIR}/nohup.out"
11
+ echo "create file ${BASE_DIR}/nohup.out"
12
+ fi
13
+
14
+ nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out"
15
+
16
+ echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out"
scripts/tout.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #打开日志
3
+
4
+ cd `dirname $0`/..
5
+ export BASE_DIR=`pwd`
6
+ echo $BASE_DIR
7
+
8
+ # check the nohup.out log output file
9
+ if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10
+ echo "No file ${BASE_DIR}/nohup.out"
11
+ exit -1;
12
+ fi
13
+
14
+ tail -f "${BASE_DIR}/nohup.out"