HackPig520 commited on
Commit
fd755ac
1 Parent(s): 97adf7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -127
app.py CHANGED
@@ -1,137 +1,124 @@
1
  import gradio as gr
2
- import os
3
- import sys
4
- import json
5
- import requests
6
 
7
- MODEL = "gpt-3.5-turbo"
8
- API_URL = os.getenv("API_URL")
9
- DISABLED = os.getenv("DISABLED") == 'True'
10
- OPENAI_API_KEY = os.getenv("OPEN_API_KEY")
11
- print (API_URL)
12
- print (OPENAI_API_KEY)
13
 
14
- def exception_handler(exception_type, exception, traceback):
15
- print("%s: %s" % (exception_type.__name__, exception))
16
- sys.excepthook = exception_handler
17
- sys.tracebacklimit = 0
 
 
 
 
 
 
 
 
18
 
19
- def parse_codeblock(text):
20
- lines = text.split("\n")
21
- for i, line in enumerate(lines):
22
- if "```" in line:
23
- if line != "```":
24
- lines[i] = f'<pre><code class="{lines[i][3:]}">'
25
- else:
26
- lines[i] = '</code></pre>'
27
- else:
28
- if i > 0:
29
- lines[i] = "<br/>" + line.replace("<", "&lt;").replace(">", "&gt;")
30
- return "".join(lines)
31
-
32
- def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request):
33
- payload = {
34
- "model": MODEL,
35
- "messages": [{"role": "user", "content": f"{inputs}"}],
36
- "temperature" : 1.0,
37
- "top_p":1.0,
38
- "n" : 1,
39
- "stream": True,
40
- "presence_penalty":0,
41
- "frequency_penalty":0,
42
- }
43
 
44
- headers = {
45
- "Content-Type": "application/json",
46
- "Authorization": f"Bearer {OPENAI_API_KEY}",
47
- "Headers": f"{request.kwargs['headers']}"
48
- }
49
-
50
- # print(f"chat_counter - {chat_counter}")
51
- if chat_counter != 0 :
52
- messages = []
53
- for i, data in enumerate(history):
54
- if i % 2 == 0:
55
- role = 'user'
56
- else:
57
- role = 'assistant'
58
- message = {}
59
- message["role"] = role
60
- message["content"] = data
61
- messages.append(message)
62
 
63
- message = {}
64
- message["role"] = "user"
65
- message["content"] = inputs
66
- messages.append(message)
67
- payload = {
68
- "model": MODEL,
69
- "messages": messages,
70
- "temperature" : temperature,
71
- "top_p": top_p,
72
- "n" : 1,
73
- "stream": True,
74
- "presence_penalty":0,
75
- "frequency_penalty":0,
76
- }
 
77
 
78
- chat_counter += 1
79
-
80
- history.append(inputs)
81
- token_counter = 0
82
- partial_words = ""
83
- counter = 0
84
-
85
- try:
86
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
87
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
88
- response_code = f"{response}"
89
-
90
- for chunk in response.iter_lines():
91
- #Skipping first chunk
92
- if counter == 0:
93
- counter += 1
94
- continue
95
- #counter+=1
96
- # check whether each line is non-empty
97
- if chunk.decode() :
98
- chunk = chunk.decode()
99
- # decode each line as response data is in bytes
100
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
101
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
102
- if token_counter == 0:
103
- history.append(" " + partial_words)
104
  else:
105
- history[-1] = partial_words
106
- token_counter += 1
107
- yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history}
108
- except Exception as e:
109
- print (f'error found: {e}')
110
- yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True)
111
- print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
112
-
113
-
114
- def reset_textbox():
115
- return gr.update(value='', interactive=False), gr.update(interactive=False)
116
-
117
- title = """<h1 align="center">GPT-3.5 Turbo Free (4K token limit, Long-Term Availability)</h1>"""
118
- if DISABLED:
119
- title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. Please check back tomorrow.</h1>"""
120
- description = """
121
- In this app, you can explore the outputs of a gpt-4 turbo LLM.
122
- """
123
-
124
- theme = gr.themes.Default(primary_hue="green")
125
-
126
- with gr.Blocks(theme=theme) as app:
127
- gr.HTML(title)
128
- with gr.Column(elem_id = "col_container", visible=False) as main_block:
129
- chatbot = gr.Chatbot(elem_id='chatbot') #c
130
- inputs = gr.Textbox(placeholder= "Hi there!", label= "Type some and press Enter") #t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
- with gr.Accordion("Parameters", open=False):
133
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
134
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
135
- chat_counter = gr.Number(value=0, visible=False, precision=0)
136
 
137
- app.launch(share=False)
 
 
 
1
  import gradio as gr
2
+ import random
3
+ from gpti import bing, gpt
 
 
4
 
5
+ def error_alert(message):
6
+ gr.Warning(message)
 
 
 
 
7
 
8
+ with gr.Blocks() as demo:
9
+ gr.Markdown("""
10
+ # ChatGPT
11
+ Interact with GPT-3.5-turbo, GPT-3.5, or Bing to explore a world of intelligent answers and informative discoveries. You can also visit my [website](https://aryahcr.cc/) where you can generate multiple images and more.
12
+ """)
13
+ chatbot = gr.Chatbot()
14
+ radio = gr.Radio(["ChatGPT", "Bing"], value="ChatGPT", label="Select the AI model you want to chat with", info="AI")
15
+ drp = gr.Dropdown(
16
+ interactive=True, choices=["gpt-4", "gpt-3.5-turbo"], value="gpt-4", label="Select Model", info="ChatGPT", visible=True
17
+ )
18
+ msg = gr.Textbox(placeholder="Message", show_label=False)
19
+ clear = gr.ClearButton([msg, chatbot])
20
 
21
+ def change_model(req):
22
+ match req.lower():
23
+ case "bing":
24
+ return gr.Dropdown(
25
+ interactive=True, choices=["Balanced", "Creative", "Precise"], value="Balanced", label="Select Model", info="Bing", visible=True
26
+ )
27
+ case "chatgpt":
28
+ return gr.Dropdown(
29
+ interactive=True, choices=["gpt-3.5-turbo", "gpt-3.5-turbo"], value="gpt-4", label="Select Model", info="ChatGPT", visible=True
30
+ )
31
+ case _:
32
+ return gr.Dropdown(
33
+ visible=False
34
+ )
 
 
 
 
 
 
 
 
 
 
35
 
36
+ def user_msg(message, history):
37
+ return "", history + [[message, None]]
38
+
39
+ def strm_message(history, option, model):
40
+ model_ai = None
41
+ if option.lower() if option is not None else "" and option.lower() in ["chatgpt", "bing"]:
42
+ model_ai = model
43
+ ai_option = option if option is not None else "chatgpt"
 
 
 
 
 
 
 
 
 
 
44
 
45
+ messages_history = []
46
+ cnt = 0
47
+ for x in range(len(history)):
48
+ cnt = x
49
+ for user, assistant in list(history):
50
+ if assistant != None:
51
+ messages_history.append({
52
+ "role": "assistant",
53
+ "content": assistant
54
+ })
55
+ if user != None:
56
+ messages_history.append({
57
+ "role": "user",
58
+ "content": user
59
+ })
60
 
61
+ res = None
62
+ if ai_option.lower() == "chatgpt":
63
+ try:
64
+ res = gpt.v1(messages=messages_history, model=model_ai, markdown=False)
65
+
66
+ if res.error != None:
67
+ error_alert("The error has occurred. Please try again.")
68
+ history[cnt][1] = None
69
+ yield history
70
+ else:
71
+ res_bot = res.result
72
+ if res_bot.get("gpt") != None:
73
+ history[cnt][1] = res_bot.get("gpt")
74
+ yield history
 
 
 
 
 
 
 
 
 
 
 
 
75
  else:
76
+ error_alert("The error has occurred. Please try again.")
77
+ history[cnt][1] = None
78
+ yield history
79
+ except Exception as e:
80
+ error_alert("The error has occurred. Please try again.")
81
+ history[cnt][1] = None
82
+ yield history
83
+ elif ai_option.lower() == "bing":
84
+ try:
85
+ res = bing(messages=messages_history, conversation_style=model_ai, markdown=False, stream=True)
86
+
87
+ if res.error != None:
88
+ error_alert("The error has occurred. Please try again.")
89
+ history[cnt][1] = None
90
+ yield history
91
+ else:
92
+ msg_x = None
93
+ for chunk in res.stream():
94
+ if chunk.get("error") != None and chunk.get("error") != True and chunk.get("message") != None:
95
+ msg_x = chunk.get("message")
96
+ history[cnt][1] = msg_x
97
+ yield history
98
+ if msg_x != None:
99
+ history[cnt][1] = msg_x
100
+ yield history
101
+ else:
102
+ error_alert("The error has occurred. Please try again.")
103
+ msg_x = None
104
+ history[cnt][1] = None
105
+ yield history
106
+ except Exception as e:
107
+ error_alert("The error has occurred. Please try again.")
108
+ history[cnt][1] = None
109
+ yield history
110
+ else:
111
+ error_alert("You haven't selected an AI model to start")
112
+ history[cnt][1] = None
113
+ yield history
114
+
115
+ radio.change(fn=change_model, inputs=radio, outputs=drp)
116
+
117
+ msg.submit(user_msg, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=True).then(
118
+ strm_message, [chatbot, radio, drp], chatbot
119
+ )
120
 
 
 
 
 
121
 
122
+ demo.queue()
123
+ if __name__ == "__main__":
124
+ demo.launch()