File size: 2,255 Bytes
834aa94
 
6717fb6
51571c7
834aa94
 
 
fb5b02d
 
834aa94
 
fb5b02d
 
 
 
 
 
 
834aa94
51571c7
 
 
 
 
6717fb6
fb5b02d
51571c7
 
 
fb5b02d
 
 
99ccf3a
 
 
fb5b02d
 
834aa94
6717fb6
 
 
 
834aa94
 
 
fb5b02d
834aa94
 
fb5b02d
834aa94
 
 
 
 
 
 
fb5b02d
 
 
 
 
 
 
 
834aa94
fb5b02d
 
 
 
834aa94
fb5b02d
834aa94
fb5b02d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from huggingface_hub import InferenceClient
import gradio as gr
from datetime import datetime
from textblob import TextBlob  # for typo correction

API_URL = "https://api-inference.huggingface.co/models/"

# Initialize the InferenceClient
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")

def format_prompt(message, history):
    """Format the prompt for the text generation model."""
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

def correct_typos(text):
    """Correct typos in the text using TextBlob."""
    corrected_text = str(TextBlob(text).correct())
    return corrected_text

def generate(prompt, history):
    """Generate a response using the text generation model."""
    # Correct typos in the prompt
    prompt = correct_typos(prompt)

    # Check if the prompt is asking who created the bot
    if "who created you" in prompt.lower():
        return "I was created by Aniket Kumar and many more."
    # Handle small talk
    elif "how are you" in prompt.lower():
        return "I'm an AI and don't have feelings, but I'm here to help you. How can I assist you today?"

    # Set up parameters for text generation
    generate_kwargs = dict(
        temperature=0.9,
        max_new_tokens=512,
        top_p=0.95,
        repetition_penalty=1.0,
        do_sample=True,
    )

    # Format the prompt
    formatted_prompt = format_prompt(prompt, history)

    # Generate the response
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        output += response.token.text
        yield output
    return output

def create_interface():
    """Create the Gradio interface."""
    customCSS = """
    #component-7 { # this is the default element ID of the chat component
      height: 800px; # adjust the height as needed
      flex-grow: 1;
    }
    """

    with gr.Blocks(css=customCSS) as demo:
        gr.ChatInterface(
            generate,
        )

    demo.queue().launch(debug=True)

# Run the application
create_interface()