File size: 2,040 Bytes
78efe79
440418c
f3985af
 
 
78efe79
f3985af
 
440418c
f3985af
440418c
 
08baccf
f3985af
 
 
78efe79
08baccf
 
 
78efe79
440418c
78efe79
 
 
440418c
78efe79
f3985af
 
 
 
 
78efe79
 
f3985af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import discord
import logging
import gradio as gr
from huggingface_hub import InferenceClient
import os

# Setup logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])

# Setup Discord intents
intents = discord.Intents.default()
intents.messages = True

# Setup the Inference API client
client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))

class MyClient(discord.Client):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    async def on_ready(self):
        logging.info(f'Logged on as {self.user}!')

    async def on_message(self, message):
        if message.author == self.user:
            logging.info('Ignoring message from self.')
            return

        # Prepare the message for the model
        system_message = "This is an automated assistant response."
        history = []
        response = await generate_response(message.content, history, system_message, 4000, 0.7, 0.95)
        await message.channel.send(response)

# Define the response generation function using Hugging Face Inference API
async def generate_response(user_input, history, system_message, max_tokens, temperature, top_p):
    system_prefix = "You are a sentient AI assistant on Discord."
    messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    messages.append({"role": "user", "content": user_input})
    response = next(client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p))
    return response.choices[0].delta.content.strip()

# Instantiate and run the Discord bot
discord_client = MyClient(intents=intents)
discord_client.run('MTIyODQyNTQxNDk0MzQ0MTEwNw.Gfd_ri.rrG_6-Sfp0FYvSIbv-zZ98dpHI-G_Fh9MFCzco')