Spaces:
Runtime error
Runtime error
rexthecoder
commited on
Commit
•
7c3e9f4
1
Parent(s):
ac8786f
chore: fix
Browse files- main.py +0 -29
- src/agent/base.py +2 -6
- src/agent/tools/conversation.py +43 -0
- src/agent/tools/text_summary.py +9 -6
- src/api.py +10 -36
main.py
CHANGED
@@ -7,11 +7,7 @@ sys.path.insert(0, "src")
|
|
7 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp'
|
8 |
|
9 |
from telegram.ext import (
|
10 |
-
CommandHandler,
|
11 |
-
CallbackContext,
|
12 |
Application,
|
13 |
-
Updater,
|
14 |
-
ContextTypes,
|
15 |
)
|
16 |
from api import GirlfriendGPT
|
17 |
from functools import partial
|
@@ -27,23 +23,12 @@ logging.basicConfig(
|
|
27 |
)
|
28 |
logger = logging.getLogger(__name__)
|
29 |
|
30 |
-
# def show_results(response_messages):
|
31 |
-
# print(colored("\nResults: ", "blue", attrs=["bold"]))
|
32 |
-
# for message in response_messages:
|
33 |
-
# if message.mime_type:
|
34 |
-
# print(message.url, end="\n\n")
|
35 |
-
# else:
|
36 |
-
# print(message.text, end="\n\n")
|
37 |
|
38 |
|
39 |
|
40 |
|
41 |
|
42 |
|
43 |
-
# async def echo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
|
44 |
-
# """Echo the user message."""
|
45 |
-
# await update.message.reply_text("Enter your text")
|
46 |
-
# return GET_TEXT
|
47 |
|
48 |
|
49 |
class LoggingDisabled:
|
@@ -59,11 +44,7 @@ class LoggingDisabled:
|
|
59 |
def main():
|
60 |
app = Application.builder().token(
|
61 |
'6207542226:AAEoNVXw766dq8fYqVvBQW9Hve2Rovq0ERI',).build()
|
62 |
-
# application = Application.builder().token('6207542226:AAGXBVVxNNUKLUz17-5_sGJnhFDaVWUXTc8').build()
|
63 |
-
# application.add_handler(CommandHandler('start', hello))
|
64 |
|
65 |
-
# # Run the bot until the user presses Ctrl-C
|
66 |
-
# application.run_polling()
|
67 |
run_agent(
|
68 |
agent=GirlfriendGPT(
|
69 |
token="hello",
|
@@ -75,16 +56,6 @@ def main():
|
|
75 |
|
76 |
def run_agent(agent: GirlfriendGPT, as_api: bool = False) -> None:
|
77 |
agent.handlers()
|
78 |
-
# For Debugging
|
79 |
-
# summary_handler = agent.conversation_summary_handler()
|
80 |
-
# agent.application.add_handler(summary_handler)
|
81 |
-
# agent.application.add_handler(CommandHandler('start', hello))
|
82 |
-
# # agent.application.add_handler(CommandHandler('summary', agent.conversation_summary))
|
83 |
-
# # agent.application.add_handler(MessageHandler(
|
84 |
-
# # filters.TEXT & ~filters.COMMAND, agent.create_response))
|
85 |
-
# agent.application.run_polling()
|
86 |
-
# # agent.application.add_handler(MessageHandler(
|
87 |
-
# # filters.TEXT & ~filters.COMMAND, agent.create_response))
|
88 |
|
89 |
|
90 |
if __name__ == "__main__":
|
|
|
7 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp'
|
8 |
|
9 |
from telegram.ext import (
|
|
|
|
|
10 |
Application,
|
|
|
|
|
11 |
)
|
12 |
from api import GirlfriendGPT
|
13 |
from functools import partial
|
|
|
23 |
)
|
24 |
logger = logging.getLogger(__name__)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
|
29 |
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
class LoggingDisabled:
|
|
|
44 |
def main():
|
45 |
app = Application.builder().token(
|
46 |
'6207542226:AAEoNVXw766dq8fYqVvBQW9Hve2Rovq0ERI',).build()
|
|
|
|
|
47 |
|
|
|
|
|
48 |
run_agent(
|
49 |
agent=GirlfriendGPT(
|
50 |
token="hello",
|
|
|
56 |
|
57 |
def run_agent(agent: GirlfriendGPT, as_api: bool = False) -> None:
|
58 |
agent.handlers()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
|
61 |
if __name__ == "__main__":
|
src/agent/base.py
CHANGED
@@ -10,11 +10,12 @@ from telegram.ext import (
|
|
10 |
)
|
11 |
|
12 |
from agent.tools.text_summary import ConversationSummary
|
|
|
13 |
|
14 |
SELECT_COMMAND, GET_TEXT = range(2)
|
15 |
|
16 |
|
17 |
-
class LangChainAgentBot(ConversationSummary):
|
18 |
def is_verbose_logging_enabled(self):
|
19 |
return True
|
20 |
|
@@ -39,8 +40,3 @@ class LangChainAgentBot(ConversationSummary):
|
|
39 |
"""Cancel the conversation."""
|
40 |
await update.message.reply_text("Oops, glad to help you.")
|
41 |
return ConversationHandler.END
|
42 |
-
|
43 |
-
|
44 |
-
async def create_response(self, update: Update, context: CallbackContext) -> None:
|
45 |
-
await update.message.chat.send_action(action=telegram.constants.ChatAction.TYPING)
|
46 |
-
await update.message.reply_text(update.message.text)
|
|
|
10 |
)
|
11 |
|
12 |
from agent.tools.text_summary import ConversationSummary
|
13 |
+
from agent.tools.conversation import Conversation
|
14 |
|
15 |
SELECT_COMMAND, GET_TEXT = range(2)
|
16 |
|
17 |
|
18 |
+
class LangChainAgentBot(ConversationSummary, Conversation):
|
19 |
def is_verbose_logging_enabled(self):
|
20 |
return True
|
21 |
|
|
|
40 |
"""Cancel the conversation."""
|
41 |
await update.message.reply_text("Oops, glad to help you.")
|
42 |
return ConversationHandler.END
|
|
|
|
|
|
|
|
|
|
src/agent/tools/conversation.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from telegram import Update
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
+
from telegram.ext import (
|
6 |
+
Updater,
|
7 |
+
CommandHandler,
|
8 |
+
ConversationHandler,
|
9 |
+
CallbackContext,
|
10 |
+
CallbackQueryHandler,
|
11 |
+
MessageHandler,
|
12 |
+
filters
|
13 |
+
)
|
14 |
+
|
15 |
+
NAME = "Conversation"
|
16 |
+
|
17 |
+
DESCRIPTION = """
|
18 |
+
Useful for building up conversation.
|
19 |
+
Input: A normal chat text
|
20 |
+
Output: A text
|
21 |
+
"""
|
22 |
+
|
23 |
+
GET_CON = range(1)
|
24 |
+
|
25 |
+
class Conversation():
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
27 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
28 |
+
|
29 |
+
async def talk(self, input: str):
|
30 |
+
logging.info(f"{input}")
|
31 |
+
new_user_input_ids = self.tokenizer.encode(input(f"{input}") + self.tokenizer.eos_token, return_tensors='pt')
|
32 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
|
33 |
+
chat_history_ids =self.model.generate(bot_input_ids, max_length=1000, pad_token_id=self.tokenizer.eos_token_id)
|
34 |
+
return "{}".format(self.tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))
|
35 |
+
|
36 |
+
|
37 |
+
async def process_conversation(self, update: Update, context: CallbackContext) -> int:
|
38 |
+
message = update.message.text
|
39 |
+
text = await self.talk(message)
|
40 |
+
await update.message.reply_text(f'{text}')
|
41 |
+
|
42 |
+
|
43 |
+
|
src/agent/tools/text_summary.py
CHANGED
@@ -3,11 +3,9 @@ import logging
|
|
3 |
import telegram
|
4 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
5 |
from telegram.ext import (
|
6 |
-
Updater,
|
7 |
CommandHandler,
|
8 |
ConversationHandler,
|
9 |
CallbackContext,
|
10 |
-
CallbackQueryHandler,
|
11 |
MessageHandler,
|
12 |
filters
|
13 |
)
|
@@ -33,19 +31,24 @@ class ConversationSummary():
|
|
33 |
|
34 |
model = AutoModelForSeq2SeqLM.from_pretrained(
|
35 |
"mrm8488/flan-t5-small-finetuned-samsum")
|
36 |
-
|
|
|
37 |
async def summarize(self, input: str, words: int):
|
38 |
logging.info(f"{input} {words}")
|
39 |
input_ids = self.tokenizer(input, return_tensors="pt").input_ids
|
40 |
outputs = self.model.generate(input_ids, max_length=words)
|
41 |
decoded_output = self.tokenizer.decode(
|
42 |
-
outputs[0], skip_special_tokens=True
|
|
|
43 |
return f"{decoded_output}"
|
44 |
|
45 |
def conversation_summary_handler(self) -> ConversationHandler:
|
46 |
handler = ConversationHandler(
|
47 |
-
entry_points=[
|
48 |
-
|
|
|
|
|
|
|
49 |
states={
|
50 |
GET_TEXT: [MessageHandler(filters.TEXT & ~filters.COMMAND, self.process_conversation_summary)],
|
51 |
},
|
|
|
3 |
import telegram
|
4 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
5 |
from telegram.ext import (
|
|
|
6 |
CommandHandler,
|
7 |
ConversationHandler,
|
8 |
CallbackContext,
|
|
|
9 |
MessageHandler,
|
10 |
filters
|
11 |
)
|
|
|
31 |
|
32 |
model = AutoModelForSeq2SeqLM.from_pretrained(
|
33 |
"mrm8488/flan-t5-small-finetuned-samsum")
|
34 |
+
|
35 |
+
# Method to summarize our comversation
|
36 |
async def summarize(self, input: str, words: int):
|
37 |
logging.info(f"{input} {words}")
|
38 |
input_ids = self.tokenizer(input, return_tensors="pt").input_ids
|
39 |
outputs = self.model.generate(input_ids, max_length=words)
|
40 |
decoded_output = self.tokenizer.decode(
|
41 |
+
outputs[0], skip_special_tokens=True,
|
42 |
+
)
|
43 |
return f"{decoded_output}"
|
44 |
|
45 |
def conversation_summary_handler(self) -> ConversationHandler:
|
46 |
handler = ConversationHandler(
|
47 |
+
entry_points=[
|
48 |
+
CommandHandler(
|
49 |
+
"summary", self.conversation_summary,
|
50 |
+
),
|
51 |
+
],
|
52 |
states={
|
53 |
GET_TEXT: [MessageHandler(filters.TEXT & ~filters.COMMAND, self.process_conversation_summary)],
|
54 |
},
|
src/api.py
CHANGED
@@ -7,24 +7,23 @@ from pydantic import Field
|
|
7 |
from agent.base import LangChainAgentBot
|
8 |
from telegram.ext import Updater, CommandHandler, CallbackContext, Application, ContextTypes
|
9 |
from telegram import Update
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
|
12 |
VERBOSE = True
|
13 |
|
14 |
|
15 |
-
class GirlFriendAIConfig():
|
16 |
-
elevenlabs_api_key: str = Field(
|
17 |
-
default="", description="Optional API KEY for ElevenLabs Voice Bot"
|
18 |
-
)
|
19 |
-
elevenlabs_voice_id: str = Field(
|
20 |
-
default="", description="Optional voice_id for ElevenLabs Voice Bot"
|
21 |
-
)
|
22 |
-
|
23 |
async def hello(update: Update, context: CallbackContext) -> None:
|
24 |
intro_text = f"🤖 Greetings human!🤗\nI'm a bot built by Rexthecoder\n🦾 I can do a lot of things"
|
25 |
await update.message.reply_text(intro_text)
|
26 |
|
27 |
-
class GirlfriendGPT(LangChainAgentBot):
|
28 |
"""Deploy LangChain chatbots and connect them to Telegram."""
|
29 |
|
30 |
token: str
|
@@ -40,33 +39,8 @@ class GirlfriendGPT(LangChainAgentBot):
|
|
40 |
|
41 |
def handlers(self):
|
42 |
summary_handler = self.conversation_summary_handler()
|
|
|
43 |
self.application.add_handler(summary_handler)
|
44 |
self.application.add_handler(CommandHandler('start', hello))
|
45 |
-
self.application.run_polling()
|
46 |
-
|
47 |
-
# async def echo(self, update: Update, context: CallbackContext) -> None:
|
48 |
-
# """Echo the user message."""
|
49 |
-
# await update.message.reply_text(update.message.text)
|
50 |
|
51 |
-
|
52 |
-
# """Return tool to generate spoken version of output text."""
|
53 |
-
# # return None
|
54 |
-
# return GenerateSpeechTool(
|
55 |
-
# client=self.client,
|
56 |
-
# voice_id=self.config.elevenlabs_voice_id,
|
57 |
-
# elevenlabs_api_key=self.config.elevenlabs_api_key,
|
58 |
-
# )
|
59 |
-
|
60 |
-
# def get_memory(self, chat_id):
|
61 |
-
# if self.context and self.context.invocable_instance_handle:
|
62 |
-
# my_instance_handle = self.context.invocable_instance_handle
|
63 |
-
# else:
|
64 |
-
# my_instance_handle = "local-instance-handle"
|
65 |
-
# memory = ConversationBufferMemory(
|
66 |
-
# memory_key="chat_history",
|
67 |
-
# chat_memory=ChatMessageHistory(
|
68 |
-
# client=self.client, key=f"history-{chat_id}-{my_instance_handle}"
|
69 |
-
# ),
|
70 |
-
# return_messages=True,
|
71 |
-
# )
|
72 |
-
# return memory
|
|
|
7 |
from agent.base import LangChainAgentBot
|
8 |
from telegram.ext import Updater, CommandHandler, CallbackContext, Application, ContextTypes
|
9 |
from telegram import Update
|
10 |
+
from telegram.ext import (
|
11 |
+
CommandHandler,
|
12 |
+
ConversationHandler,
|
13 |
+
CallbackContext,
|
14 |
+
MessageHandler,
|
15 |
+
filters
|
16 |
+
)
|
17 |
|
18 |
|
19 |
VERBOSE = True
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
async def hello(update: Update, context: CallbackContext) -> None:
|
23 |
intro_text = f"🤖 Greetings human!🤗\nI'm a bot built by Rexthecoder\n🦾 I can do a lot of things"
|
24 |
await update.message.reply_text(intro_text)
|
25 |
|
26 |
+
class GirlfriendGPT(LangChainAgentBot, ):
|
27 |
"""Deploy LangChain chatbots and connect them to Telegram."""
|
28 |
|
29 |
token: str
|
|
|
39 |
|
40 |
def handlers(self):
|
41 |
summary_handler = self.conversation_summary_handler()
|
42 |
+
self.application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, self.process_conversation))
|
43 |
self.application.add_handler(summary_handler)
|
44 |
self.application.add_handler(CommandHandler('start', hello))
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
+
self.application.run_polling()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|