File size: 1,598 Bytes
b50242b
10079ba
b50242b
 
558ed59
10079ba
 
558ed59
b50242b
558ed59
 
 
cc37a15
558ed59
 
 
 
cc37a15
558ed59
 
 
 
cc37a15
558ed59
 
10079ba
558ed59
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import json

# Load the pre-trained model and tokenizer
model_name = "microsoft/DialoGPT-large"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Load JSON files containing questions and answers
json_files = ["fileone.json", "filesecond.json", "filethird.json", "filefourth.json", "filefifth.json"]
question_answer_pairs = []

for file_name in json_files:
    with open(file_name, "r") as file:
        data = json.load(file)
        question_answer_pairs.extend(data)

def generate_response(input_text):
    for question_answer_pair in question_answer_pairs:
        if question_answer_pair["question"].lower() in input_text.lower():
            return question_answer_pair["answer"]

    # If no specific answer found, use DialoGPT model
    input_ids = tokenizer.encode(input_text, return_tensors="pt")
    response_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
    response_text = tokenizer.decode(response_ids[0], skip_special_tokens=True)
    return response_text

# Define Gradio interface
inputs = gr.Textbox(lines=3, label="Input")
outputs = gr.Textbox(label="Output")
title = "Chat with AI"
description = "This AI chatbot can answer your questions based on provided JSON files and also generate responses using DialoGPT-large model."
examples = [["What is your name?"]]
gr.Interface(fn=generate_response, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples).launch()