Update app.py
Browse files
app.py
CHANGED
@@ -6,14 +6,95 @@ import re
|
|
6 |
import sys
|
7 |
from typing import List, Generator
|
8 |
import os
|
9 |
-
from langchain_openai import ChatOpenAI
|
10 |
from dotenv import load_dotenv
|
|
|
11 |
|
12 |
load_dotenv()
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
class ArticleCrew:
|
15 |
def __init__(self):
|
16 |
-
#
|
17 |
self.planner = Agent(
|
18 |
role="Content Planner",
|
19 |
goal="Plan engaging and factually accurate content on {topic}",
|
@@ -44,7 +125,6 @@ class ArticleCrew:
|
|
44 |
self.output_parser = OutputParser()
|
45 |
|
46 |
def create_tasks(self, topic: str):
|
47 |
-
# Task definitions remain the same
|
48 |
plan_task = Task(
|
49 |
description=(
|
50 |
f"1. Prioritize the latest trends, key players, and noteworthy news on {topic}.\n"
|
@@ -83,38 +163,46 @@ class ArticleCrew:
|
|
83 |
verbose=2
|
84 |
)
|
85 |
|
86 |
-
|
87 |
-
def __init__(self):
|
88 |
-
self.data = []
|
89 |
-
self.current_chunk = ""
|
90 |
-
|
91 |
-
def write(self, text):
|
92 |
-
self.current_chunk += text
|
93 |
-
if "\n" in text:
|
94 |
-
self.data.append(self.current_chunk)
|
95 |
-
self.current_chunk = ""
|
96 |
-
return len(text)
|
97 |
-
|
98 |
-
stream = StreamCapture()
|
99 |
original_stdout = sys.stdout
|
100 |
-
sys.stdout =
|
101 |
|
102 |
try:
|
103 |
-
|
|
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
finally:
|
120 |
sys.stdout = original_stdout
|
@@ -141,16 +229,14 @@ def create_demo():
|
|
141 |
)
|
142 |
|
143 |
async def process_input(topic, history):
|
144 |
-
# Add user message as ChatMessage
|
145 |
history.append(ChatMessage(role="user", content=f"Write an article about: {topic}"))
|
146 |
yield history
|
147 |
|
148 |
-
# Process and add agent messages
|
149 |
async for messages in article_crew.process_article(topic):
|
150 |
history.extend(messages)
|
151 |
yield history
|
152 |
|
153 |
-
btn = gr.Button("Write Article")
|
154 |
btn.click(
|
155 |
process_input,
|
156 |
inputs=[topic, chatbot],
|
@@ -161,4 +247,5 @@ def create_demo():
|
|
161 |
|
162 |
if __name__ == "__main__":
|
163 |
demo = create_demo()
|
|
|
164 |
demo.launch(debug=True, share=True)
|
|
|
6 |
import sys
|
7 |
from typing import List, Generator
|
8 |
import os
|
|
|
9 |
from dotenv import load_dotenv
|
10 |
+
import threading
|
11 |
|
12 |
load_dotenv()
|
13 |
|
14 |
+
class OutputParser:
|
15 |
+
def __init__(self):
|
16 |
+
self.buffer = ""
|
17 |
+
self.current_agent = None
|
18 |
+
|
19 |
+
def parse_output(self, text: str) -> List[ChatMessage]:
|
20 |
+
messages = []
|
21 |
+
# Clean ANSI codes
|
22 |
+
cleaned_text = re.sub(r'\x1B\[[0-9;]*[mK]', '', text)
|
23 |
+
|
24 |
+
# Look for working agent declarations
|
25 |
+
agent_match = re.search(r'\[DEBUG\]: == Working Agent: (.*?)(?=\n|$)', cleaned_text)
|
26 |
+
if agent_match:
|
27 |
+
self.current_agent = agent_match.group(1)
|
28 |
+
messages.append(ChatMessage(
|
29 |
+
role="assistant",
|
30 |
+
content=f"Starting work...",
|
31 |
+
metadata={"title": f"π€ {self.current_agent}"}
|
32 |
+
))
|
33 |
+
|
34 |
+
# Look for task information with full task list
|
35 |
+
task_match = re.search(r'\[INFO\]: == Starting Task: (.*?)(?=\n\n|\n> Entering|$)', cleaned_text, re.DOTALL)
|
36 |
+
if task_match and self.current_agent:
|
37 |
+
task_content = task_match.group(1).strip()
|
38 |
+
messages.append(ChatMessage(
|
39 |
+
role="assistant",
|
40 |
+
content=task_content,
|
41 |
+
metadata={"title": f"π Task for {self.current_agent}"}
|
42 |
+
))
|
43 |
+
|
44 |
+
# Look for thought processes
|
45 |
+
thought_match = re.search(r'Thought: (.*?)(?=\nAction:|$)', cleaned_text, re.DOTALL)
|
46 |
+
if thought_match and self.current_agent:
|
47 |
+
thought_content = thought_match.group(1).strip()
|
48 |
+
messages.append(ChatMessage(
|
49 |
+
role="assistant",
|
50 |
+
content=thought_content,
|
51 |
+
metadata={"title": f"π {self.current_agent}'s Thoughts"}
|
52 |
+
))
|
53 |
+
|
54 |
+
# Look for final answers from non-Editor agents
|
55 |
+
if "Final Answer:" in cleaned_text and self.current_agent != "Editor":
|
56 |
+
answer_match = re.search(r'Final Answer:\s*(.*?)(?=\n> Finished chain|$)', cleaned_text, re.DOTALL)
|
57 |
+
if answer_match:
|
58 |
+
answer_content = answer_match.group(1).strip()
|
59 |
+
messages.append(ChatMessage(
|
60 |
+
role="assistant",
|
61 |
+
content=answer_content,
|
62 |
+
metadata={"title": f"π‘ Output from {self.current_agent}"}
|
63 |
+
))
|
64 |
+
|
65 |
+
# Special handling for Editor's final answer (the final article)
|
66 |
+
elif "Final Answer:" in cleaned_text and self.current_agent == "Editor":
|
67 |
+
answer_match = re.search(r'Final Answer:\s*(.*?)(?=\n> Finished chain|$)', cleaned_text, re.DOTALL)
|
68 |
+
if answer_match:
|
69 |
+
answer_content = answer_match.group(1).strip()
|
70 |
+
# First send the metadata marker
|
71 |
+
messages.append(ChatMessage(
|
72 |
+
role="assistant",
|
73 |
+
content="Final article is ready!",
|
74 |
+
metadata={"title": "π Final Article"}
|
75 |
+
))
|
76 |
+
# Then send the actual content without metadata
|
77 |
+
messages.append(ChatMessage(
|
78 |
+
role="assistant",
|
79 |
+
content=answer_content
|
80 |
+
))
|
81 |
+
|
82 |
+
return messages
|
83 |
+
|
84 |
+
class StreamingCapture:
|
85 |
+
def __init__(self):
|
86 |
+
self.buffer = ""
|
87 |
+
|
88 |
+
def write(self, text):
|
89 |
+
self.buffer += text
|
90 |
+
return len(text)
|
91 |
+
|
92 |
+
def flush(self):
|
93 |
+
pass
|
94 |
+
|
95 |
class ArticleCrew:
|
96 |
def __init__(self):
|
97 |
+
# Initialize agents
|
98 |
self.planner = Agent(
|
99 |
role="Content Planner",
|
100 |
goal="Plan engaging and factually accurate content on {topic}",
|
|
|
125 |
self.output_parser = OutputParser()
|
126 |
|
127 |
def create_tasks(self, topic: str):
|
|
|
128 |
plan_task = Task(
|
129 |
description=(
|
130 |
f"1. Prioritize the latest trends, key players, and noteworthy news on {topic}.\n"
|
|
|
163 |
verbose=2
|
164 |
)
|
165 |
|
166 |
+
capture = StreamingCapture()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
original_stdout = sys.stdout
|
168 |
+
sys.stdout = capture
|
169 |
|
170 |
try:
|
171 |
+
# Start the crew task in a separate thread to not block streaming
|
172 |
+
result_container = []
|
173 |
|
174 |
+
def run_crew():
|
175 |
+
try:
|
176 |
+
result = crew.kickoff(inputs={"topic": topic})
|
177 |
+
result_container.append(result)
|
178 |
+
except Exception as e:
|
179 |
+
result_container.append(e)
|
180 |
|
181 |
+
thread = threading.Thread(target=run_crew)
|
182 |
+
thread.start()
|
183 |
+
|
184 |
+
# Stream output while the crew is working
|
185 |
+
last_processed = 0
|
186 |
+
while thread.is_alive() or last_processed < len(capture.buffer):
|
187 |
+
if len(capture.buffer) > last_processed:
|
188 |
+
new_content = capture.buffer[last_processed:]
|
189 |
+
messages = self.output_parser.parse_output(new_content)
|
190 |
+
if messages:
|
191 |
+
for msg in messages:
|
192 |
+
yield [msg]
|
193 |
+
last_processed = len(capture.buffer)
|
194 |
+
await asyncio.sleep(0.1)
|
195 |
+
|
196 |
+
# Check if we got a result or an error
|
197 |
+
if result_container and not isinstance(result_container[0], Exception):
|
198 |
+
# Final messages already sent by the parser
|
199 |
+
pass
|
200 |
+
else:
|
201 |
+
yield [ChatMessage(
|
202 |
+
role="assistant",
|
203 |
+
content="An error occurred while generating the article.",
|
204 |
+
metadata={"title": "β Error"}
|
205 |
+
)]
|
206 |
|
207 |
finally:
|
208 |
sys.stdout = original_stdout
|
|
|
229 |
)
|
230 |
|
231 |
async def process_input(topic, history):
|
|
|
232 |
history.append(ChatMessage(role="user", content=f"Write an article about: {topic}"))
|
233 |
yield history
|
234 |
|
|
|
235 |
async for messages in article_crew.process_article(topic):
|
236 |
history.extend(messages)
|
237 |
yield history
|
238 |
|
239 |
+
btn = gr.Button("Write Article", variant="primary")
|
240 |
btn.click(
|
241 |
process_input,
|
242 |
inputs=[topic, chatbot],
|
|
|
247 |
|
248 |
if __name__ == "__main__":
|
249 |
demo = create_demo()
|
250 |
+
demo.queue()
|
251 |
demo.launch(debug=True, share=True)
|