File size: 1,440 Bytes
57f5d5d
c64277b
57f5d5d
 
 
 
 
 
ab65d01
57f5d5d
 
 
ab65d01
57f5d5d
 
 
ab65d01
 
57f5d5d
ab65d01
57f5d5d
ab65d01
57f5d5d
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from transformers import AutoModelForCausalLM, AutoTokenizer
# import gradio as gr

# Load pre-trained model and tokenizer
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

def generate_response(input_text):
    """
    Generate response based on the given user messages.
    Parameters:
    - input_text (str): A single string containing all user messages.
    Returns:
    - response (str): The generated response.
    """
    # Tokenize the input text
    inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
    # Generate response
    generated_ids = model.generate(inputs, max_length=1024, do_sample=True)
    # Decode the generated response
    response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
    return response

# Define Gradio interface components
input_chat = gr.Textbox(lines=5, label="Input Chat", placeholder="Enter chat messages...")
output_response = gr.Textbox(label="Generated Response", placeholder="Generated response will appear here...")

# Create Gradio interface
gr.Interface(generate_response, input_chat, output_response,
             title="Chat Response Generation",
             description="Generate responses based on user messages using Mistral AI model.",
             theme="default",
             allow_flagging="never").launch()