ajeetkumar01's picture
Update app.py
c64277b verified
from transformers import AutoModelForCausalLM, AutoTokenizer
# import gradio as gr
# Load pre-trained model and tokenizer
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_response(input_text):
"""
Generate response based on the given user messages.
Parameters:
- input_text (str): A single string containing all user messages.
Returns:
- response (str): The generated response.
"""
# Tokenize the input text
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
# Generate response
generated_ids = model.generate(inputs, max_length=1024, do_sample=True)
# Decode the generated response
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return response
# Define Gradio interface components
input_chat = gr.Textbox(lines=5, label="Input Chat", placeholder="Enter chat messages...")
output_response = gr.Textbox(label="Generated Response", placeholder="Generated response will appear here...")
# Create Gradio interface
gr.Interface(generate_response, input_chat, output_response,
title="Chat Response Generation",
description="Generate responses based on user messages using Mistral AI model.",
theme="default",
allow_flagging="never").launch()