Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import vllm
|
3 |
+
import torch
|
4 |
+
from collections import Counter
|
5 |
+
|
6 |
+
# Initialize Model
|
7 |
+
llm = vllm.LLM(
|
8 |
+
"Qwen/Qwen2.5-32B-Instruct-AWQ",
|
9 |
+
tensor_parallel_size=2,
|
10 |
+
quantization="AWQ",
|
11 |
+
gpu_memory_utilization=0.95,
|
12 |
+
trust_remote_code=True,
|
13 |
+
dtype="half",
|
14 |
+
enforce_eager=True,
|
15 |
+
max_model_len=10500,
|
16 |
+
)
|
17 |
+
tokenizer = llm.get_tokenizer()
|
18 |
+
|
19 |
+
# Helper Functions
|
20 |
+
def extract_answer(text):
|
21 |
+
idx = text.rfind("\\boxed")
|
22 |
+
if idx < 0:
|
23 |
+
return None
|
24 |
+
|
25 |
+
i = idx
|
26 |
+
num_open = 0
|
27 |
+
close_idx = None
|
28 |
+
|
29 |
+
while i < len(text):
|
30 |
+
if text[i] == "{":
|
31 |
+
num_open += 1
|
32 |
+
elif text[i] == "}":
|
33 |
+
num_open -= 1
|
34 |
+
if num_open == 0:
|
35 |
+
close_idx = i
|
36 |
+
break
|
37 |
+
i += 1
|
38 |
+
|
39 |
+
if close_idx is None:
|
40 |
+
return None
|
41 |
+
|
42 |
+
boxed = text[idx:close_idx + 1]
|
43 |
+
left = "\\boxed{"
|
44 |
+
try:
|
45 |
+
assert boxed[:len(left)] == left
|
46 |
+
assert boxed[-1] == "}"
|
47 |
+
return boxed[len(left):-1]
|
48 |
+
except:
|
49 |
+
return None
|
50 |
+
|
51 |
+
def majority_vote(answers):
|
52 |
+
answers = [a for a in answers if a is not None]
|
53 |
+
if not answers:
|
54 |
+
return None
|
55 |
+
counts = Counter(answers)
|
56 |
+
return counts.most_common(1)[0][0]
|
57 |
+
|
58 |
+
class TIRAgent:
|
59 |
+
def __init__(self, problem_id, id, problem, tokenizer, max_depth, log):
|
60 |
+
self.problem_id = problem_id
|
61 |
+
self.id = id
|
62 |
+
self.depth = 1
|
63 |
+
self.max_depth = max_depth
|
64 |
+
self.tokenizer = tokenizer
|
65 |
+
self.problem = problem
|
66 |
+
self.messages = [
|
67 |
+
{
|
68 |
+
"role": "user",
|
69 |
+
"content": f"""Here is a boolean expression to simplify:
|
70 |
+
{self.problem}
|
71 |
+
|
72 |
+
Show the step by step simplification using Boolean algebra laws. For each step:
|
73 |
+
1. Write the current expression
|
74 |
+
2. Name the rule applied
|
75 |
+
3. Explain the transformation clearly
|
76 |
+
|
77 |
+
Put your final simplified answer in a LaTeX box \\boxed{{}}."""
|
78 |
+
}
|
79 |
+
]
|
80 |
+
self.last_response = None
|
81 |
+
self.answers = []
|
82 |
+
self.is_complete = False
|
83 |
+
self.log = log
|
84 |
+
self.next_prompt = None
|
85 |
+
|
86 |
+
def complete(self):
|
87 |
+
return self.is_complete
|
88 |
+
|
89 |
+
def add_response(self, response):
|
90 |
+
self.depth += 1
|
91 |
+
self.last_response = response
|
92 |
+
self.messages.append({"role": "assistant", "content": response})
|
93 |
+
|
94 |
+
# Extract boxed answer if present
|
95 |
+
answer = extract_answer(response)
|
96 |
+
if answer is not None:
|
97 |
+
self.answers.append(answer)
|
98 |
+
|
99 |
+
# Mark complete after first response
|
100 |
+
self.is_complete = True
|
101 |
+
|
102 |
+
def next_message(self):
|
103 |
+
assert not self.is_complete
|
104 |
+
text = self.tokenizer.apply_chat_template(
|
105 |
+
self.messages,
|
106 |
+
tokenize=False,
|
107 |
+
add_generation_prompt=True
|
108 |
+
)
|
109 |
+
return text
|
110 |
+
|
111 |
+
def final_answer(self):
|
112 |
+
ans = None
|
113 |
+
if len(self.answers) > 0:
|
114 |
+
ans = self.answers[-1]
|
115 |
+
if self.log:
|
116 |
+
self.log.writerow([self.problem_id, self.id, ans])
|
117 |
+
return ans
|
118 |
+
|
119 |
+
class SCTIRAgent:
|
120 |
+
def __init__(self, problem_id, problem, tokenizer, samples, max_depth, log):
|
121 |
+
self.problem_id = problem_id
|
122 |
+
self.problem = problem
|
123 |
+
self.tokenizer = tokenizer
|
124 |
+
self.samples = samples
|
125 |
+
self.max_depth = max_depth
|
126 |
+
self.agents = [
|
127 |
+
TIRAgent(problem_id, i, problem, tokenizer, max_depth, log)
|
128 |
+
for i in range(samples)
|
129 |
+
]
|
130 |
+
self.log = log
|
131 |
+
|
132 |
+
def complete(self):
|
133 |
+
return all(agent.complete() for agent in self.agents)
|
134 |
+
|
135 |
+
def get_ready_agents(self):
|
136 |
+
return [agent for agent in self.agents if not agent.complete()]
|
137 |
+
|
138 |
+
def final_answer(self):
|
139 |
+
assert self.complete()
|
140 |
+
answers = [agent.final_answer() for agent in self.agents]
|
141 |
+
answer = majority_vote(answers)
|
142 |
+
return answer if answer is not None else None
|
143 |
+
|
144 |
+
# Sampling parameters
|
145 |
+
sampling_params = vllm.SamplingParams(
|
146 |
+
max_tokens=512,
|
147 |
+
temperature=0.7,
|
148 |
+
top_p=0.9
|
149 |
+
)
|
150 |
+
|
151 |
+
def simplify_boolean_expression(expression):
|
152 |
+
agent = SCTIRAgent(0, expression, tokenizer, samples=1, max_depth=1, log=None)
|
153 |
+
|
154 |
+
while not agent.complete():
|
155 |
+
ready_agents = agent.get_ready_agents()
|
156 |
+
texts = [a.next_message() for a in ready_agents]
|
157 |
+
|
158 |
+
responses = llm.generate(texts, sampling_params)
|
159 |
+
|
160 |
+
for j, ready_agent in enumerate(ready_agents):
|
161 |
+
response = responses[j].outputs[0].text
|
162 |
+
ready_agent.add_response(response)
|
163 |
+
|
164 |
+
answer = agent.final_answer()
|
165 |
+
return answer
|
166 |
+
|
167 |
+
# Gradio Interface
|
168 |
+
def interface(boolean_expr):
|
169 |
+
simplified_expr = simplify_boolean_expression(boolean_expr)
|
170 |
+
return simplified_expr
|
171 |
+
|
172 |
+
# Gradio app
|
173 |
+
app = gr.Interface(
|
174 |
+
fn=interface,
|
175 |
+
inputs=gr.Textbox(label="Enter Boolean Expression", placeholder="e.g., (B.C' + A'.D).(A.B' + C.D')"),
|
176 |
+
outputs=gr.Textbox(label="Final Simplified Expression"),
|
177 |
+
title="Boolean Expression Simplifier",
|
178 |
+
description="Input a Boolean expression, and the model will provide the final simplified result.",
|
179 |
+
)
|
180 |
+
|
181 |
+
if __name__ == "__main__":
|
182 |
+
app.launch()
|