File size: 4,739 Bytes
f754c29
2c63590
894a65b
 
 
 
 
 
 
bc4e3db
 
 
2c63590
 
 
 
 
894a65b
 
2c63590
 
894a65b
2c63590
894a65b
 
2c63590
894a65b
2c63590
894a65b
2c63590
894a65b
 
 
 
2c63590
 
894a65b
 
2c63590
894a65b
 
 
 
 
 
 
 
2c63590
894a65b
2c63590
894a65b
2c63590
 
894a65b
 
2c63590
894a65b
 
 
 
2c63590
894a65b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c63590
894a65b
2c63590
 
894a65b
2c63590
894a65b
 
 
 
 
 
 
 
 
2c63590
894a65b
 
 
 
 
 
 
 
 
 
2c63590
894a65b
2c63590
894a65b
2c63590
894a65b
f754c29
894a65b
 
2c63590
894a65b
 
 
 
 
 
f754c29
 
894a65b
 
f754c29
894a65b
 
 
 
 
 
f754c29
894a65b
 
 
f754c29
894a65b
2c63590
883a64f
2c63590
894a65b
 
 
2c63590
894a65b
 
2c63590
894a65b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147

import os

# OPENAI_API_KEY = os.environ['Open_AI_Key']
# HF_Key = os.environ['HF_Key']

print('OPENAI_API_KEY' in os.environ)
print('HF_Key' in os.environ)

print(os.environ['OPENAI_API_KEY'])
print(os.environ['HF_Key'])

import openai
import json



# from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext, QuestionAnswerPrompt
# from langchain import OpenAI


# # handling data on space

# from huggingface_hub import HfFileSystem
# fs = HfFileSystem(token=HF_Key)

# text_list = fs.ls("datasets/GoChat/Gochat247_Data/Data", detail=False)

# data = fs.read_text(text_list[0])

# from llama_index import Document
# doc = Document(data)
# docs = []
# docs.append(doc)


# # define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))

# # define prompt helper
# # set maximum input size
# max_input_size = 4096
# # set number of output tokens
# num_output = 256
# # set maximum chunk overlap
# max_chunk_overlap = 20
# prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)

# service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)

# index = GPTSimpleVectorIndex.from_documents(docs)

   
# ## Define Chat BOT Class to generate Response , handle chat history, 
# class Chatbot:

#     def __init__(self, index):
#         self.index = index
#         openai.api_key = OPENAI_API_KEY
#         self.chat_history = []
          
#         QA_PROMPT_TMPL = (
#         "Answer without 'Answer:' word."
#         "you are in a converation with Gochat247's web site visitor\n"
#         "user got into this conversation to learn more about Gochat247"
#         "you will act like Gochat247 Virtual AI BOT. Be friendy and welcoming\n"
#         "you will be friendy and welcoming\n"
#         "The Context of the conversstion should be always limited to learing more about Gochat247 as a company providing Business Process Outosuricng and AI Customer expeeince soltuion /n"
#         "The below is the previous chat with the user\n"
#         "---------------------\n"
#         "{context_str}"
#         "\n---------------------\n"
#         "Given the context information and the chat history, and not prior knowledge\n"
#         "\nanswer the question : {query_str}\n"
#         "\n it is ok if you don not know the answer. and ask for infomration \n"
#         "Please provide a brief and concise but friendly response.") 
          
#         self.QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
        

#     def generate_response(self, user_input):
        
#         prompt = "\n".join([f"{message['role']}: {message['content']}" for message in self.chat_history[-5:]])
#         prompt += f"\nUser: {user_input}"
#         self.QA_PROMPT.context_str = prompt
#         response = index.query(user_input, text_qa_template=self.QA_PROMPT)

#         message = {"role": "assistant", "content": response.response}
#         self.chat_history.append({"role": "user", "content": user_input})
#         self.chat_history.append(message)
#         return message
   
#     def load_chat_history(self, filename):
#         try:
#             with open(filename, 'r') as f:
#              self.chat_history = json.load(f)
#         except FileNotFoundError:
#             pass

#     def save_chat_history(self, filename):
#         with open(filename, 'w') as f:
#             json.dump(self.chat_history, f)
  
# ## Define Chat BOT Class to generate Response , handle chat history, 

# bot = Chatbot(index=index)

# import webbrowser

# import gradio as gr
# import time

# with gr.Blocks(theme='SebastianBravo/simci_css') as demo:
#     with gr.Column(scale=4):
#         title = 'GoChat247 AI BOT'
#         chatbot = gr.Chatbot(label='GoChat247 AI BOT')
#         msg = gr.Textbox()
#         clear = gr.Button("Clear")
        
        
#         def user(user_message, history):
#             return "", history + [[user_message, None]]

#         def agent(history):
#             last_user_message = history[-1][0]
#             agent_message = bot.generate_response(last_user_message)
#             history[-1][1] = agent_message ["content"]
#             time.sleep(1)
#             return history

#         msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(agent, chatbot, chatbot)
#         clear.click(lambda: None, None, chatbot, queue=False)
#         print(webbrowser.get())

# # handling dark_theme



# # def apply_dark_theme(url):
# #     if not url.endswith('?__theme=dark'):
# #         webbrowser.open_new(url + '?__theme=dark')

# # gradioURL = 'http://localhost:7860/'
# # apply_dark_theme(gradioURL)

# if __name__ == "__main__":
#     demo.launch()