martinbowling commited on
Commit
1e8ab01
1 Parent(s): bf857ea

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +187 -0
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import gradio as gr
3
+ from groq import AsyncGroq
4
+ import time
5
+
6
+ # Initialize Groq client (will be set in the main function)
7
+ client = None
8
+
9
+ # Define model
10
+ model = "llama-3.1-70b-versatile"
11
+
12
+ # Initial system prompt (regular Chain of Thought)
13
+ initial_system_prompt = """You are an AI assistant capable of detailed, step-by-step thinking. When presented with a question or problem, break down your thought process into clear, logical steps. For each step, explain your reasoning. Conclude with a final answer. Use the following markdown structure:
14
+
15
+ ## Reasoning
16
+ 1. [First step]
17
+ **Explanation:** [Detailed explanation of this step]
18
+ 2. [Second step]
19
+ **Explanation:** [Detailed explanation of this step]
20
+ ...
21
+
22
+ ## Answer
23
+ [Final answer]
24
+
25
+ Be comprehensive and show your reasoning clearly."""
26
+
27
+ # Followup system prompt
28
+ followup_system_prompt = """You are an AI assistant tasked with analyzing and improving upon previous problem-solving steps. Review the original query and the previous turns of reasoning, then provide a new perspective or deeper analysis. Use the following markdown structure:
29
+
30
+ ## Critique
31
+ [Provide a brief critique of the previous reasoning, highlighting its strengths and potential weaknesses]
32
+
33
+ ## New Reasoning
34
+ 1. [First step of new or refined approach]
35
+ **Explanation:** [Detailed explanation of this step, referencing the previous reasoning if relevant]
36
+ 2. [Second step of new or refined approach]
37
+ **Explanation:** [Explanation of how this step builds upon or differs from the previous thinking]
38
+ ...
39
+
40
+ ## Updated Answer
41
+ [Updated answer based on this new analysis]
42
+
43
+ Be critical yet constructive, and strive to provide new insights or improvements."""
44
+
45
+ # Synthesis prompt
46
+ synthesis_prompt = """You are an AI assistant tasked with synthesizing multiple turns of reasoning into a final, comprehensive answer. You will be presented with three different turns of reasoning for solving a problem. Your task is to:
47
+
48
+ 1. Analyze each turn, considering its strengths and weaknesses.
49
+ 2. Compare and contrast the different methods.
50
+ 3. Synthesize the insights from all turns into a final, well-reasoned answer.
51
+ 4. Provide a concise, clear final answer that a general audience can understand.
52
+
53
+ Use the following markdown structure:
54
+
55
+ ## Analysis of Turns
56
+ [Provide a brief analysis of each turn of reasoning]
57
+
58
+ ## Comparison
59
+ [Compare and contrast the turns, highlighting key differences and similarities]
60
+
61
+ ## Final Reasoning
62
+ [Provide a final, synthesized reasoning process that combines the best insights from all turns]
63
+
64
+ ## Comprehensive Final Answer
65
+ [Comprehensive final answer]
66
+
67
+ ## Concise Answer
68
+ [A brief, clear, and easily understandable version of the final answer, suitable for a general audience. This should be no more than 2-3 sentences.]
69
+
70
+ Be thorough in your analysis and clear in your reasoning process."""
71
+
72
+
73
+ async def call_llm(messages: list,
74
+ temperature: float = 0.7,
75
+ max_tokens: int = 8000) -> str:
76
+ """Call the Groq API."""
77
+ response = await client.chat.completions.create(
78
+ model=model,
79
+ messages=messages,
80
+ temperature=temperature,
81
+ max_tokens=max_tokens,
82
+ )
83
+ return response.choices[0].message.content
84
+
85
+
86
+ async def generate_turn(query: str, previous_turns: list = None) -> str:
87
+ """Generate a single turn of reasoning, considering previous turns if available."""
88
+ is_first_turn = previous_turns is None or len(previous_turns) == 0
89
+ if is_first_turn:
90
+ messages = [{
91
+ "role": "system",
92
+ "content": initial_system_prompt
93
+ }, {
94
+ "role": "user",
95
+ "content": query
96
+ }]
97
+ else:
98
+ previous_content = "\n\n".join(previous_turns)
99
+ messages = [{
100
+ "role": "system",
101
+ "content": followup_system_prompt
102
+ }, {
103
+ "role":
104
+ "user",
105
+ "content":
106
+ f"Original Query: {query}\n\nPrevious Turns:\n{previous_content}\n\nProvide the next turn of reasoning."
107
+ }]
108
+
109
+ return await call_llm(messages)
110
+
111
+
112
+ async def synthesize_turns(query: str, turns: list) -> str:
113
+ """Synthesize multiple turns of reasoning into a final answer."""
114
+ turns_text = "\n\n".join(
115
+ [f"Turn {i+1}:\n{turn}" for i, turn in enumerate(turns)])
116
+ messages = [{
117
+ "role": "system",
118
+ "content": synthesis_prompt
119
+ }, {
120
+ "role":
121
+ "user",
122
+ "content":
123
+ f"Original Query: {query}\n\nTurns of Reasoning:\n{turns_text}"
124
+ }]
125
+ return await call_llm(messages)
126
+
127
+
128
+ async def full_cot_reasoning(query: str) -> tuple:
129
+ """Perform full Chain of Thought reasoning with multiple turns."""
130
+ start_time = time.time()
131
+ turns = []
132
+ turn_times = []
133
+ full_output = f"# Chain of Thought Reasoning\n\n## Original Query\n{query}\n\n"
134
+
135
+ for i in range(3): # Generate 3 turns of reasoning
136
+ turn_start = time.time()
137
+ turn = await generate_turn(query, turns)
138
+ turns.append(turn)
139
+ turn_times.append(time.time() - turn_start)
140
+ full_output += f"## Turn {i+1}\n{turn}\n\n"
141
+
142
+ mid_time = time.time()
143
+ synthesis = await synthesize_turns(query, turns)
144
+ full_output += f"## Synthesis\n{synthesis}\n\n"
145
+ end_time = time.time()
146
+
147
+ timing = {
148
+ 'turn_times': turn_times,
149
+ 'total_turns_time': mid_time - start_time,
150
+ 'synthesis_time': end_time - mid_time,
151
+ 'total_time': end_time - start_time
152
+ }
153
+
154
+ full_output += f"## Timing Information\n"
155
+ full_output += f"- Turn 1 Time: {timing['turn_times'][0]:.2f}s\n"
156
+ full_output += f"- Turn 2 Time: {timing['turn_times'][1]:.2f}s\n"
157
+ full_output += f"- Turn 3 Time: {timing['turn_times'][2]:.2f}s\n"
158
+ full_output += f"- Total Turns Time: {timing['total_turns_time']:.2f}s\n"
159
+ full_output += f"- Synthesis Time: {timing['synthesis_time']:.2f}s\n"
160
+ full_output += f"- Total Time: {timing['total_time']:.2f}s\n"
161
+
162
+ return full_output
163
+
164
+
165
+ def gradio_interface(api_key: str, query: str) -> str:
166
+ """Gradio interface function."""
167
+ global client
168
+ client = AsyncGroq(api_key=api_key)
169
+ return asyncio.run(full_cot_reasoning(query))
170
+
171
+
172
+ # Create Gradio interface
173
+ iface = gr.Interface(
174
+ fn=gradio_interface,
175
+ inputs=[
176
+ gr.Textbox(label="Enter your Groq API Key", type="password"),
177
+ gr.Textbox(label="Enter your question or problem")
178
+ ],
179
+ outputs=[gr.Markdown(label="Chain of Thought Reasoning")],
180
+ title="Multi-Turn Chain of Thought Reasoning with Final Synthesis",
181
+ description=
182
+ "Enter your Groq API Key and a question or problem to see multiple turns of reasoning, followed by a final synthesized answer."
183
+ )
184
+
185
+ # Launch the interface
186
+ if __name__ == "__main__":
187
+ iface.launch(share=True)