File size: 3,075 Bytes
d4ee6df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93dc89f
d4ee6df
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import sys

import gradio as gr
import torch
from langchain import OpenAI
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms.base import LLM
from llama_index import (
    Document,
    GPTListIndex,
    GPTSimpleVectorIndex,
    LangchainEmbedding,
    LLMPredictor,
    PromptHelper,
    SimpleDirectoryReader,
)
from transformers import pipeline


hfemb = HuggingFaceEmbeddings()
embed_model = LangchainEmbedding(hfemb)
#OpenAI.api_key = "YOUR_API_KEY"

def build_the_bot(input_text):
    # set maximum input
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 256
    # set maximum chunk overlap
    max_chunk_overlap = 20
    # set chunk size limit
    chunk_size_limit = 600

    #OpenAI.api_key = api_key

    prompt_helper = PromptHelper(
        max_input_size,
        num_outputs,
        max_chunk_overlap,
        chunk_size_limit=chunk_size_limit,
    )

    # define LLM
    llm_predictor = LLMPredictor(
        llm=OpenAI(temperature=1, model_name="gpt-3.5-turbo", max_tokens=num_outputs )
    )
    text_list = [input_text]
    documents = [Document(t) for t in text_list]
    global index
    index = GPTSimpleVectorIndex(
        documents,
        embed_model=embed_model,
        llm_predictor=llm_predictor,
        prompt_helper=prompt_helper,
    )
    return "Index saved successfully!!!"

def set_api_key(api_key):
  #OpenAI.api_key = api_key
  os.environ["OPENAI_API_KEY"] = api_key
  return 'key has been set'


def chat(chat_history, user_input):
    bot_response = index.query(user_input)
    # print(bot_response)
    response = ""
    for letter in "".join(
        bot_response.response
    ):  # [bot_response[i:i+1] for i in range(0, len(bot_response), 1)]:
        response += letter + ""
        yield chat_history + [(user_input, response)]




with gr.Blocks() as demo:
    gr.Markdown("This application allows you to give GPT-3.5 (ChatGPT) as much context as you want. Just insert the text you want GPT to use in the context box and a llama index will be created wich allows the model to query your contextual information.")
    with gr.Tab("Input Context Information"):
        openai_api_key = gr.Textbox(label= 'OpenAIAPIKey: ', type= 'password' )
        api_out = gr.Textbox(label='API key stauts ')
        api_button = gr.Button('set api key')
        api_button.click(set_api_key, openai_api_key, api_out)
        text_input = gr.Textbox(label = 'Input Context Documents')
        text_output = gr.Textbox(label = 'Depending on the length of your context information it may take a few minutes for the bot to build')
        text_button = gr.Button("Build the Bot!")
        text_button.click(build_the_bot, text_input, text_output)
        
    with gr.Tab("Bot With Context"):
        #          inputbox = gr.Textbox("Input your text to build a Q&A Bot here....."
        chatbot = gr.Chatbot()
        message = gr.Textbox("What is the context document about?")
        message.submit(chat, [chatbot, message], chatbot)

demo.queue().launch(debug=True)