Spaces:
Runtime error
Runtime error
noelfranthomas
commited on
Commit
•
d501ce0
1
Parent(s):
7e6f0ab
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import discord
|
2 |
+
# from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
# from transformers import pipeline
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
# Lite mode
|
9 |
+
# from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
10 |
+
# model_name = "deepset/roberta-base-squad2"
|
11 |
+
# nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
12 |
+
|
13 |
+
# tokenizer = AutoTokenizer.from_pretrained(name,)
|
14 |
+
|
15 |
+
# model = AutoModelForQuestionAnswering.from_pretrained(name)
|
16 |
+
|
17 |
+
# nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
#load_dotenv()
|
22 |
+
|
23 |
+
# TODO
|
24 |
+
# Add chat history to DialoGPT for context
|
25 |
+
|
26 |
+
## Initialize models
|
27 |
+
|
28 |
+
# Question Answering
|
29 |
+
# model_name = "deepset/roberta-base-squad2"
|
30 |
+
# nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
31 |
+
|
32 |
+
# Conversational
|
33 |
+
convo_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
|
34 |
+
convo_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
35 |
+
|
36 |
+
# Summarizer
|
37 |
+
# summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
38 |
+
|
39 |
+
print('Models and tokenizers successfully loaded')
|
40 |
+
|
41 |
+
# Get Discord client
|
42 |
+
client = discord.Client()
|
43 |
+
|
44 |
+
## Event Handlers
|
45 |
+
|
46 |
+
@client.event
|
47 |
+
async def on_ready():
|
48 |
+
print('We have logged in as {0.user}'.format(client))
|
49 |
+
status = "We have logged in"
|
50 |
+
|
51 |
+
@client.event
|
52 |
+
async def on_message(message):
|
53 |
+
if message.author == client.user:
|
54 |
+
return
|
55 |
+
|
56 |
+
if message.content.startswith('$ping'):
|
57 |
+
await message.channel.send('pong')
|
58 |
+
print(message.author)
|
59 |
+
print(message.author.name)
|
60 |
+
print(message.author.avatar)
|
61 |
+
|
62 |
+
if message.content.startswith('$help'):
|
63 |
+
await message.channel.send('List of stable commands: ')
|
64 |
+
await message.channel.send('COMMANDS')
|
65 |
+
await message.channel.send('List of nightly commands: ')
|
66 |
+
await message.channel.send('COMMANDS')
|
67 |
+
|
68 |
+
if message.content.startswith('$q'):
|
69 |
+
question = message.content[4::]
|
70 |
+
if(len(question) == 0):
|
71 |
+
await message.channel.send("I didn't get that.")
|
72 |
+
return
|
73 |
+
else:
|
74 |
+
print(question)
|
75 |
+
QA_input = {
|
76 |
+
'question': 'Why is model conversion important?',
|
77 |
+
'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
|
78 |
+
}
|
79 |
+
res = nlp(QA_input)
|
80 |
+
ans = res['answer']
|
81 |
+
score = res['score']
|
82 |
+
await message.channel.send(ans)
|
83 |
+
await message.channel.send('score: ' + score)
|
84 |
+
|
85 |
+
# if message.content.startswith('$q'):
|
86 |
+
# question = message.content[4::]
|
87 |
+
# if(len(question) == 0):
|
88 |
+
# await message.channel.send("I didn't get that.")
|
89 |
+
# return
|
90 |
+
# else:
|
91 |
+
# print(question)
|
92 |
+
# QA_input = {
|
93 |
+
# 'question': 'Why is model conversion important?',
|
94 |
+
# 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
|
95 |
+
# }
|
96 |
+
# res = nlp(QA_input)
|
97 |
+
# ans = res['answer']
|
98 |
+
# score = res['score']
|
99 |
+
# await message.channel.send(ans)
|
100 |
+
# await message.channel.send('score: ' + score)
|
101 |
+
|
102 |
+
# if message.content.startswith('$TLDR'):
|
103 |
+
# query = message.content[6::]
|
104 |
+
|
105 |
+
# if(len(query) < 2):
|
106 |
+
# messages = [message.content async for message in message.channel.history(limit=30)]
|
107 |
+
# messages_str = ".".join(messages)
|
108 |
+
# summary = summarizer(messages_str, max_length=130, min_length=30, do_sample=False)
|
109 |
+
# await message.channel.send('Summary: ' + summary[0]['summary_text'])
|
110 |
+
# elif(len(query) < 35):
|
111 |
+
# await message.channel.send('Too short to summarize.')
|
112 |
+
# else:
|
113 |
+
# summary = summarizer(query, max_length=130, min_length=30, do_sample=False)
|
114 |
+
# await message.channel.send('Summary: ' + summary[0]['summary_text'])
|
115 |
+
|
116 |
+
# if message.content.startswith('$talk'):
|
117 |
+
# chat = message.content[6::]
|
118 |
+
# if(len(chat) == 0):
|
119 |
+
# await message.channel.send("I didn't get that.")
|
120 |
+
# return
|
121 |
+
# else:
|
122 |
+
# print(chat)
|
123 |
+
|
124 |
+
# input_ids = convo_tokenizer.encode(chat + convo_tokenizer.eos_token, return_tensors='pt')
|
125 |
+
# return_ids = convo_model.generate(input_ids, max_length=1000, pad_token_id=convo_tokenizer.eos_token_id)
|
126 |
+
# response = "{}".format(convo_tokenizer.decode(return_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True))
|
127 |
+
|
128 |
+
# await message.channel.send(response)
|
129 |
+
|
130 |
+
client.run(str(os.environ.get('TOKEN')))
|
131 |
+
# client.run(str(os.getenv('TOKEN')))
|