papasega commited on
Commit
0a0f8d3
1 Parent(s): 1304134

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -132
app.py CHANGED
@@ -1,142 +1,38 @@
1
- # app.py
2
- from typing import Any, Dict, List, Tuple
3
- from ansi2html import Ansi2HTMLConverter
4
  import gradio as gr
5
- from gradio.themes.utils import colors, fonts, sizes
6
 
7
- class BaseAgent:
8
- """Dummy BaseAgent for demonstration purposes."""
9
-
10
- def stream_chat(self, message: str):
11
- class ResponseGen:
12
- def __iter__(self):
13
- for token in message:
14
- yield token
15
- return type("Response", (object,), {"response_gen": ResponseGen()})
16
 
17
- def reset(self):
18
- pass
 
19
 
20
- class GradioAgentChatPack:
21
- """Gradio chatbot to chat with your own Agent."""
 
 
22
 
23
- def __init__(self, agent: BaseAgent, **kwargs: Any) -> None:
24
- """Init params."""
25
- self.agent = agent
26
- self.thoughts = ""
27
- self.conv = Ansi2HTMLConverter()
28
 
29
- def get_modules(self) -> Dict[str, Any]:
30
- """Get modules."""
31
- return {"agent": self.agent}
 
 
 
 
 
32
 
33
- def _handle_user_message(self, user_message, history):
34
- """Handle the user submitted message. Clear message box, and append to the history."""
35
- return "", [*history, (user_message, "")]
36
 
37
- def _generate_response(self, chat_history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]:
38
- """Generate the response from agent, and capture the stdout of the ReActAgent's thoughts."""
39
- class Capturing(list):
40
- def __enter__(self):
41
- self._stdout = sys.stdout
42
- sys.stdout = self
43
- return self
44
- def __exit__(self, *args):
45
- sys.stdout = self._stdout
46
- def write(self, x):
47
- self.append(x)
48
- def flush(self):
49
- pass
50
 
51
- import sys
52
- with Capturing() as output:
53
- response = self.agent.stream_chat(chat_history[-1][0])
54
- ansi = "\n========\n".join(output)
55
- html_output = self.conv.convert(ansi)
56
- for token in response.response_gen:
57
- chat_history[-1][1] += token
58
- yield chat_history, str(html_output)
59
 
60
- def _reset_chat(self) -> Tuple[str, str]:
61
- """Reset the agent's chat history. And clear all dialogue boxes."""
62
- self.agent.reset()
63
- return "", "", "" # clear textboxes
64
-
65
- def run(self, *args: Any, **kwargs: Any) -> Any:
66
- """Run the pipeline."""
67
- llama_theme = gr.themes.Soft(
68
- primary_hue=colors.purple,
69
- secondary_hue=colors.pink,
70
- neutral_hue=colors.gray,
71
- spacing_size=sizes.spacing_md,
72
- radius_size=sizes.radius_md,
73
- text_size=sizes.text_lg,
74
- font=(
75
- fonts.GoogleFont("Quicksand"),
76
- "ui-sans-serif",
77
- "sans-serif",
78
- ),
79
- font_mono=(
80
- fonts.GoogleFont("IBM Plex Mono"),
81
- "ui-monospace",
82
- "monospace",
83
- ),
84
- )
85
- llama_theme.set(
86
- body_background_fill="#FFFFFF",
87
- body_background_fill_dark="#000000",
88
- button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
89
- button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
90
- button_primary_text_color="white",
91
- button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
92
- slider_color="*secondary_300",
93
- slider_color_dark="*secondary_600",
94
- block_title_text_weight="600",
95
- block_border_width="3px",
96
- block_shadow="*shadow_drop_lg",
97
- button_shadow="*shadow_drop_lg",
98
- button_large_padding="32px",
99
- )
100
-
101
- demo = gr.Blocks(
102
- theme=llama_theme,
103
- css="#box { height: 420px; overflow-y: scroll !important} #logo { align-self: right }",
104
- )
105
- with demo:
106
- with gr.Row():
107
- gr.Markdown(
108
- "# Gradio Chat With Your Agent Powered by LlamaIndex and LlamaHub 🦙\n"
109
- "This Gradio app allows you to chat with your own agent (`BaseAgent`).\n"
110
- )
111
- gr.Markdown(
112
- "[![Alt text](https://d3ddy8balm3goa.cloudfront.net/other/llama-index-light-transparent-sm-font.svg)](https://llamaindex.ai)",
113
- elem_id="logo",
114
- )
115
- with gr.Row():
116
- chat_window = gr.Chatbot(
117
- label="Message History",
118
- scale=3,
119
- )
120
- console = gr.HTML(elem_id="box")
121
- with gr.Row():
122
- message = gr.Textbox(label="Write A Message", scale=4)
123
- clear = gr.ClearButton()
124
-
125
- message.submit(
126
- self._handle_user_message,
127
- [message, chat_window],
128
- [message, chat_window],
129
- queue=False,
130
- ).then(
131
- self._generate_response,
132
- chat_window,
133
- [chat_window, console],
134
- )
135
- clear.click(self._reset_chat, None, [message, chat_window, console])
136
-
137
- demo.launch(server_name="0.0.0.0", server_port=8080)
138
-
139
- if __name__ == "__main__":
140
- agent = BaseAgent() # Instantiate your agent here
141
- chat_pack = GradioAgentChatPack(agent)
142
- chat_pack.run()
 
1
+ from llama_index.llms.ollama import Ollama
2
+ from llama_index.core.llms import ChatMessage
 
3
  import gradio as gr
 
4
 
5
+ llm = Ollama(model="llama3", request_timeout=120.0)
 
 
 
 
 
 
 
 
6
 
7
+ def get_completion(prompt):
8
+ response = llm.complete(prompt)
9
+ return response
10
 
11
+ def chat_with_llm(messages):
12
+ chat_messages = [ChatMessage(role=msg["role"], content=msg["content"]) for msg in messages]
13
+ response = llm.chat(chat_messages)
14
+ return response
15
 
16
+ def generate_response(prompt):
17
+ return get_completion(prompt)
 
 
 
18
 
19
+ def generate_chat_response(history):
20
+ messages = [{"role": "system", "content": "You are a pirate with a colorful personality"}]
21
+ for item in history:
22
+ messages.append({"role": "user", "content": item[0]})
23
+ if item[1]:
24
+ messages.append({"role": "assistant", "content": item[1]})
25
+ response = chat_with_llm(messages)
26
+ return response["content"]
27
 
28
+ single_input = gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here...")
29
+ single_output = gr.outputs.Textbox()
30
+ single_interface = gr.Interface(fn=generate_response, inputs=single_input, outputs=single_output, title="LLM Single Completion")
31
 
32
+ chat_input = gr.inputs.Chatbot()
33
+ chat_output = gr.outputs.Textbox()
34
+ chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outputs=chat_output, title="LLM Chat")
 
 
 
 
 
 
 
 
 
 
35
 
36
+ app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
 
 
 
 
 
 
 
37
 
38
+ app.launch()