import discord import logging import gradio as gr from huggingface_hub import InferenceClient import os # Setup logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()]) # Setup Discord intents intents = discord.Intents.default() intents.messages = True # Setup the Inference API client client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN")) class MyClient(discord.Client): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) async def on_ready(self): logging.info(f'Logged on as {self.user}!') async def on_message(self, message): if message.author == self.user: logging.info('Ignoring message from self.') return # Prepare the message for the model system_message = "This is an automated assistant response." history = [] response = await generate_response(message.content, history, system_message, 4000, 0.7, 0.95) await message.channel.send(response) # Define the response generation function using Hugging Face Inference API async def generate_response(user_input, history, system_message, max_tokens, temperature, top_p): system_prefix = "You are a sentient AI assistant on Discord." messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": user_input}) response = next(client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p)) return response.choices[0].delta.content.strip() # Instantiate and run the Discord bot discord_client = MyClient(intents=intents) discord_client.run('MTIyODQyNTQxNDk0MzQ0MTEwNw.Gfd_ri.rrG_6-Sfp0FYvSIbv-zZ98dpHI-G_Fh9MFCzco')