|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
model_name = "mistralai/Mistral-7B-Instruct-v0.2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
def generate_response(input_text): |
|
""" |
|
Generate response based on the given user messages. |
|
Parameters: |
|
- input_text (str): A single string containing all user messages. |
|
Returns: |
|
- response (str): The generated response. |
|
""" |
|
|
|
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True) |
|
|
|
generated_ids = model.generate(inputs, max_length=1024, do_sample=True) |
|
|
|
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
input_chat = gr.Textbox(lines=5, label="Input Chat", placeholder="Enter chat messages...") |
|
output_response = gr.Textbox(label="Generated Response", placeholder="Generated response will appear here...") |
|
|
|
|
|
gr.Interface(generate_response, input_chat, output_response, |
|
title="Chat Response Generation", |
|
description="Generate responses based on user messages using Mistral AI model.", |
|
theme="default", |
|
allow_flagging="never").launch() |
|
|