Omar Solano commited on
Commit
0ebb816
β€’
1 Parent(s): 6a118a0

add app file

Browse files
Files changed (4) hide show
  1. README.md +11 -0
  2. gradio_anthropic.py +88 -0
  3. gradio_openai.py +73 -0
  4. requirements.txt +4 -0
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Claude Front End For API
3
+ emoji: πŸš€
4
+ colorFrom: red
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.21.0
8
+ app_file: gradio_anthropic.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
gradio_anthropic.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ from openai import OpenAI
5
+ import logging
6
+ import anthropic
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv(".env")
10
+
11
+ logging.basicConfig(level=logging.INFO)
12
+ logging.getLogger("gradio").setLevel(logging.INFO)
13
+ logging.getLogger("httpx").setLevel(logging.WARNING)
14
+
15
+
16
+ def generate_completion(
17
+ input, history, api_key, model, system_prompt, temperature, max_tokens
18
+ ):
19
+
20
+ if os.getenv("ANTHROPIC_API_KEY"):
21
+ api_key = os.getenv("ANTHROPIC_API_KEY")
22
+
23
+ if not api_key:
24
+ # raise ValueError("API Key is required")
25
+ yield "No API key provided"
26
+
27
+ client = anthropic.Anthropic(
28
+ api_key=api_key,
29
+ )
30
+
31
+ messages = []
32
+
33
+ if history:
34
+ for entry in history:
35
+ if len(entry) == 2:
36
+ messages.append(
37
+ {
38
+ "role": "user",
39
+ "content": entry[0],
40
+ }
41
+ )
42
+ messages.append(
43
+ {
44
+ "role": "assistant",
45
+ "content": entry[1],
46
+ }
47
+ )
48
+
49
+ messages.append(
50
+ {
51
+ "role": "user",
52
+ "content": input,
53
+ }
54
+ )
55
+
56
+ with client.messages.stream(
57
+ model=model,
58
+ max_tokens=max_tokens,
59
+ temperature=temperature,
60
+ system=system_prompt,
61
+ messages=messages,
62
+ ) as stream:
63
+ answer_str = ""
64
+ for text in stream.text_stream:
65
+ # print(text, end="", flush=True)
66
+ answer_str += text
67
+ yield answer_str
68
+
69
+
70
+ api_key = gr.Textbox(label="API Key", type="password")
71
+ model = gr.Textbox(label="Model", value="claude-3-opus-20240229")
72
+ system_prompt = gr.Textbox(
73
+ label="System Prompt",
74
+ value="You are a world-class assistant.",
75
+ )
76
+ temperature = gr.Slider(label="Temperature", value=0.0, minimum=0.0, maximum=1.0)
77
+ max_tokens = gr.Slider(label="Max Tokens", value=4096, minimum=1, maximum=4096)
78
+
79
+
80
+ demo = gr.ChatInterface(
81
+ fn=generate_completion,
82
+ additional_inputs=[api_key, model, system_prompt, temperature, max_tokens],
83
+ description="Claude Chatbot, add your own API key in the 'additional inputs' section",
84
+ )
85
+
86
+ if __name__ == "__main__":
87
+ demo.queue()
88
+ demo.launch()
gradio_openai.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ from openai import OpenAI
5
+ import logging
6
+ import anthropic
7
+
8
+ logging.basicConfig(level=logging.INFO)
9
+ logging.getLogger("gradio").setLevel(logging.INFO)
10
+ logging.getLogger("httpx").setLevel(logging.WARNING)
11
+
12
+
13
+ client = OpenAI()
14
+
15
+
16
+ def generate_completion(input, history):
17
+
18
+ messages = [
19
+ {
20
+ "role": "system",
21
+ "content": "You are a world-class extractor of information from messy job postings.",
22
+ }
23
+ ]
24
+
25
+ # Convert history from a list of lists to a list of dictionaries
26
+ if history:
27
+ for entry in history:
28
+ # Assuming each entry has exactly 2 elements: user input and assistant response
29
+ if len(entry) == 2: # Validate format
30
+ # Append user message
31
+ messages.append(
32
+ {
33
+ "role": "user",
34
+ "content": entry[0],
35
+ }
36
+ )
37
+ # Append assistant response
38
+ messages.append(
39
+ {
40
+ "role": "assistant",
41
+ "content": entry[1],
42
+ }
43
+ )
44
+
45
+ # Append the current user message
46
+ messages.append(
47
+ {
48
+ "role": "user",
49
+ "content": input,
50
+ }
51
+ )
52
+
53
+ response = client.chat.completions.create(
54
+ model="gpt-3.5-turbo-0125",
55
+ messages=messages, # type: ignore
56
+ stream=True,
57
+ temperature=0,
58
+ max_tokens=4000,
59
+ ) # type: ignore
60
+
61
+ answer_str: str = ""
62
+ for chunk in response:
63
+ if chunk.choices[0].delta.content is not None:
64
+ answer_str += chunk.choices[0].delta.content
65
+ else:
66
+ answer_str += ""
67
+ yield answer_str
68
+
69
+
70
+ if __name__ == "__main__":
71
+ demo = gr.ChatInterface(fn=generate_completion)
72
+ demo.queue()
73
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai
2
+ gradio
3
+ python-dotenv
4
+ anthropic