Spaces:
Running
Running
GUI for evaluation feedback
Browse files- .gitignore +2 -1
- eval_code.py +59 -3
- main.py +192 -39
- test/test_dbops.py +22 -0
.gitignore
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
.sesskey
|
2 |
-
.idea
|
|
|
|
1 |
.sesskey
|
2 |
+
.idea
|
3 |
+
data/*
|
eval_code.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import json
|
2 |
import time
|
3 |
|
@@ -7,6 +8,20 @@ We will handle here the code evaluation phase.
|
|
7 |
"""
|
8 |
import json
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def clean_prompt_answer(answer):
|
11 |
"""
|
12 |
Chatgpt4 is ok, does not pollute the code but 3.5 encloses it in ```
|
@@ -48,6 +63,11 @@ def eval_code_by_chatgpt(openai_client, ccode):
|
|
48 |
"criteria": "DRY",
|
49 |
"explanation": "The memory allocation and initialization for ``p1``, ``p2``, and ``p3`` are repetitive. Consider creating a function like ``allocateAndInitializeMemory``."
|
50 |
},
|
|
|
|
|
|
|
|
|
|
|
51 |
{
|
52 |
"criteria": "SRP",
|
53 |
"explanation": "The ``main`` function handles memory allocation, initialization, and printing. You should separate these responsibilities into different functions like ``allocateMemory``, ``initializeData``, and ``printData``."
|
@@ -63,15 +83,51 @@ def eval_code_by_chatgpt(openai_client, ccode):
|
|
63 |
traceback.print_exc()
|
64 |
return {"error":"There was an error while parsing the answer. Maybe ChatGPT is overloaded?"}
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
def eval_the_piece_of_c_code(openai_client, ccode):
|
67 |
"""
|
68 |
-
Main entrypoint to this module. Will be called from backend
|
|
|
|
|
69 |
|
70 |
:param ccode:
|
71 |
:return:
|
72 |
"""
|
73 |
chatgpt_ans = eval_code_by_chatgpt(openai_client, ccode)
|
74 |
chatgpt_js = parse_chatgpt_answer(chatgpt_ans)
|
75 |
-
|
76 |
-
|
77 |
|
|
|
1 |
+
import copy
|
2 |
import json
|
3 |
import time
|
4 |
|
|
|
8 |
"""
|
9 |
import json
|
10 |
|
11 |
+
|
12 |
+
EVAL_ANSWER_NOEVAL = 0
|
13 |
+
EVAL_ANSWER_POSITIVE = 1
|
14 |
+
EVAL_ANSWER_NEGATIVE = -1
|
15 |
+
|
16 |
+
CODE_AUGMENTATIONS=[
|
17 |
+
("NO_COMPILE", "The code provided is not a valid C code"),
|
18 |
+
("DRY", "Don't repeat yourself."),
|
19 |
+
("SRP", "Single object[function] responsability"),
|
20 |
+
("MC", "Magic constants."),
|
21 |
+
("NAME", "Meaningful names in the code."),
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
def clean_prompt_answer(answer):
|
26 |
"""
|
27 |
Chatgpt4 is ok, does not pollute the code but 3.5 encloses it in ```
|
|
|
63 |
"criteria": "DRY",
|
64 |
"explanation": "The memory allocation and initialization for ``p1``, ``p2``, and ``p3`` are repetitive. Consider creating a function like ``allocateAndInitializeMemory``."
|
65 |
},
|
66 |
+
{
|
67 |
+
"criteria": "DRY",
|
68 |
+
"explanation": "Tne second DRY failure, because this is the observed ChatGPT behaviour."
|
69 |
+
},
|
70 |
+
|
71 |
{
|
72 |
"criteria": "SRP",
|
73 |
"explanation": "The ``main`` function handles memory allocation, initialization, and printing. You should separate these responsibilities into different functions like ``allocateMemory``, ``initializeData``, and ``printData``."
|
|
|
83 |
traceback.print_exc()
|
84 |
return {"error":"There was an error while parsing the answer. Maybe ChatGPT is overloaded?"}
|
85 |
|
86 |
+
|
87 |
+
|
88 |
+
def add_evaluation_fields_on_js_answer(json_answer, all_criterias = None):
|
89 |
+
"""
|
90 |
+
Adds some JSON fields to store the human feedback.
|
91 |
+
|
92 |
+
The textual human feedback will always be in the 0 position.
|
93 |
+
|
94 |
+
:param json_answer:
|
95 |
+
:return:
|
96 |
+
"""
|
97 |
+
if all_criterias is None:
|
98 |
+
all_criterias = CODE_AUGMENTATIONS
|
99 |
+
enhanced_answer = []
|
100 |
+
overall_feedback = {
|
101 |
+
"criteria":"HUMAN_FEEDBACK",
|
102 |
+
"explanation":"",
|
103 |
+
"EVAL": EVAL_ANSWER_NOEVAL
|
104 |
+
}
|
105 |
+
|
106 |
+
if all_criterias is not None:
|
107 |
+
existing = {c["criteria"] for c in json_answer}
|
108 |
+
for criteria in all_criterias:
|
109 |
+
if criteria[0] not in existing:
|
110 |
+
json_answer.append({"criteria":criteria[0], "explanation":"Not infringed"})
|
111 |
+
|
112 |
+
enhanced_answer.append(overall_feedback)
|
113 |
+
for ans in json_answer:
|
114 |
+
ans = copy.deepcopy(ans)
|
115 |
+
ans["EVAL"] = EVAL_ANSWER_NOEVAL
|
116 |
+
enhanced_answer.append(ans)
|
117 |
+
|
118 |
+
return enhanced_answer
|
119 |
+
|
120 |
def eval_the_piece_of_c_code(openai_client, ccode):
|
121 |
"""
|
122 |
+
Main entrypoint to this module. Will be called from backend. Will block so multithreading pls.
|
123 |
+
|
124 |
+
Will return a proprer json and will have EVAL fields, too.
|
125 |
|
126 |
:param ccode:
|
127 |
:return:
|
128 |
"""
|
129 |
chatgpt_ans = eval_code_by_chatgpt(openai_client, ccode)
|
130 |
chatgpt_js = parse_chatgpt_answer(chatgpt_ans)
|
131 |
+
enhanced_answer = add_evaluation_fields_on_js_answer(chatgpt_js)
|
132 |
+
return enhanced_answer
|
133 |
|
main.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
import json
|
2 |
import uuid
|
3 |
-
|
4 |
-
from fasthtml.common import *
|
5 |
import os
|
6 |
-
import
|
|
|
7 |
from datetime import datetime
|
|
|
8 |
from sqlite_minutils.db import Database
|
9 |
|
|
|
|
|
|
|
|
|
10 |
# the secrets. Will be loaded from HF, or for docker --env-file or from IDE
|
11 |
|
12 |
OAUTH_CLIENT_ID = os.environ.get('OAUTH_CLIENT_ID')
|
@@ -16,17 +20,34 @@ OPENID_PROVIDER_URL = os.environ.get('OPENID_PROVIDER_URL')
|
|
16 |
SPACE_HOST = os.environ.get('SPACE_HOST')
|
17 |
|
18 |
# The database. In memory bc it is efemerial on HF anyway.
|
|
|
|
|
|
|
19 |
|
20 |
-
global_database = Database(
|
21 |
global_database_tables = global_database.t
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
session_state_table = global_database_tables.session_state
|
23 |
if session_state_table not in global_database_tables:
|
24 |
# session_id stored in cookies
|
25 |
# see EVAL_STATUS_x below for state
|
26 |
session_state_table.create(id=int, session_id=str, state=int, submitted=datetime,
|
27 |
-
completed=datetime,
|
|
|
|
|
28 |
Session_State_cls = session_state_table.dataclass()
|
29 |
|
|
|
|
|
30 |
EVAL_STATE_NEW=0
|
31 |
EVAL_STATE_QUERY=1
|
32 |
EVAL_STATE_TIMEDOUT=2
|
@@ -49,28 +70,109 @@ else:
|
|
49 |
app, rt = fast_app(debug=False, live=False, hdrs=hdrs)
|
50 |
REFRESH_TIME = 1
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
list_of_citerias = []
|
61 |
-
for caug_code, caug_txt in CODE_AUGMENTATIONS:
|
62 |
crit_tag = [H3(caug_code), P(caug_txt)]
|
63 |
list_of_citerias.extend(crit_tag)
|
64 |
# yeah, I know . . .
|
65 |
-
|
66 |
-
for eval_line in feedback_js:
|
67 |
if caug_code == eval_line["criteria"]:
|
68 |
eval_txt = P(eval_line["explanation"])
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
return Div(html_render_code_output(c_code), *list_of_citerias, _id=html_id)
|
74 |
|
75 |
def html_default_results(html_id):
|
76 |
return Div(P("This is where criterias will show up once the code is evaluated"), _id=html_id)
|
@@ -103,8 +205,12 @@ def html_render_answer_from_db(session_id, html_id):
|
|
103 |
if eval_request_status == EVAL_STATE_NEW:
|
104 |
return html_default_results(html_id), #, html_render_inputbox(HTML_RESULTS_AREA, HTML_SUBMIT_CODE_AREA)
|
105 |
if eval_request_status == EVAL_STATE_ANSWER:
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
|
|
108 |
html_render_inputbox(target_html_id=HTML_RESULTS_AREA, region_html_id=HTML_SUBMIT_CODE_AREA)) #, html_render_code_output(HTML_SUBMIT_CODE_AREA, state_obj.code)
|
109 |
if eval_request_status == EVAL_STATE_QUERY:
|
110 |
return html_waiting_for_results(html_id),
|
@@ -113,34 +219,52 @@ def html_render_answer_from_db(session_id, html_id):
|
|
113 |
|
114 |
# How can I timeout? Well ... TBD.
|
115 |
@threaded
|
116 |
-
def call_gpt_and_store_result(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
# print("evaluatign code")
|
118 |
try:
|
119 |
# Pesky way to get a new cursor, in a thread safe way, into the db. This code runs in another thread.
|
120 |
# Can we do better?
|
121 |
-
local_database = Database(
|
122 |
-
|
123 |
-
|
124 |
-
local_sess_obj_lst =
|
125 |
local_sess_obj = local_sess_obj_lst[0]
|
126 |
-
|
127 |
-
#
|
128 |
-
|
129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
local_sess_obj.state = EVAL_STATE_ERROR
|
131 |
-
local_sess_obj.answer =
|
132 |
local_sess_obj.completed = datetime.utcnow()
|
133 |
else:
|
134 |
local_sess_obj.state = EVAL_STATE_ANSWER
|
135 |
-
local_sess_obj.answer = json.dumps(gpt_js_eval)
|
136 |
local_sess_obj.completed = datetime.utcnow()
|
137 |
-
|
138 |
except:
|
139 |
import traceback
|
140 |
traceback.print_exc()
|
141 |
|
142 |
|
143 |
-
|
144 |
def html_render_inputbox(target_html_id, region_html_id):
|
145 |
txtarea = Textarea(id="ccodetoeval", name="ccodetoeval", placeholder="Enter a piece of C code", rows=3)
|
146 |
form = Form(Group(txtarea, Button("Evaluate")),
|
@@ -190,6 +314,36 @@ def get(session):
|
|
190 |
P("!! The data will be saved and maybe made public !!"),
|
191 |
]
|
192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
input_area = html_render_inputbox(target_html_id=HTML_RESULTS_AREA, region_html_id=HTML_SUBMIT_CODE_AREA)
|
194 |
results_area = html_render_answer_from_db(session_id, HTML_RESULTS_AREA)
|
195 |
clear_area = render_clear_area(session_id, HTML_CLEAR_FORM)
|
@@ -211,9 +365,10 @@ def post(ccodetoeval:str, session):
|
|
211 |
session_id=session_id,
|
212 |
state=EVAL_STATE_QUERY,
|
213 |
submitted=datetime.utcnow(),
|
214 |
-
code=ccodetoeval,
|
215 |
-
answer=""
|
216 |
)
|
|
|
|
|
|
|
217 |
# we insert and we get the new primary key
|
218 |
session_obj = session_state_table.insert(session_obj)
|
219 |
# will be executed in another thread with magic @threaded
|
@@ -232,8 +387,6 @@ def get(session):
|
|
232 |
session_id=session_id,
|
233 |
state=EVAL_STATE_NEW,
|
234 |
submitted=datetime.utcnow(),
|
235 |
-
code="",
|
236 |
-
answer=""
|
237 |
)
|
238 |
session_state_table.insert(session_obj)
|
239 |
# re-issue forms
|
|
|
1 |
import json
|
2 |
import uuid
|
|
|
|
|
3 |
import os
|
4 |
+
import pathlib
|
5 |
+
|
6 |
from datetime import datetime
|
7 |
+
from fasthtml.common import *
|
8 |
from sqlite_minutils.db import Database
|
9 |
|
10 |
+
import eval_code
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
# the secrets. Will be loaded from HF, or for docker --env-file or from IDE
|
15 |
|
16 |
OAUTH_CLIENT_ID = os.environ.get('OAUTH_CLIENT_ID')
|
|
|
20 |
SPACE_HOST = os.environ.get('SPACE_HOST')
|
21 |
|
22 |
# The database. In memory bc it is efemerial on HF anyway.
|
23 |
+
DATABASE_NAME = "data/sessions_meta.db"
|
24 |
+
pathlib.Path(DATABASE_NAME).unlink(missing_ok=True)
|
25 |
+
|
26 |
|
27 |
+
global_database = Database(DATABASE_NAME)
|
28 |
global_database_tables = global_database.t
|
29 |
+
|
30 |
+
# We store session specific feedback from registered users.
|
31 |
+
# Will be later used to submit/storage the answer.
|
32 |
+
question_evaluation_table = global_database_tables.question_answer
|
33 |
+
if question_evaluation_table not in global_database_tables:
|
34 |
+
question_evaluation_table.create(id=int, code_text=str,
|
35 |
+
answer_eval_text=str, submitted=int, pk='id')
|
36 |
+
Question_Evaluation_cls = question_evaluation_table.dataclass()
|
37 |
+
|
38 |
+
# We store real-time session IDs and the state of the questions. Will link to question_answer
|
39 |
session_state_table = global_database_tables.session_state
|
40 |
if session_state_table not in global_database_tables:
|
41 |
# session_id stored in cookies
|
42 |
# see EVAL_STATUS_x below for state
|
43 |
session_state_table.create(id=int, session_id=str, state=int, submitted=datetime,
|
44 |
+
completed=datetime, current_qeval=int, pk='id',)
|
45 |
+
# Can't really nail the fk specs
|
46 |
+
# foreign_keys=[("current_qeval", question_evaluation_table, "id")])
|
47 |
Session_State_cls = session_state_table.dataclass()
|
48 |
|
49 |
+
|
50 |
+
|
51 |
EVAL_STATE_NEW=0
|
52 |
EVAL_STATE_QUERY=1
|
53 |
EVAL_STATE_TIMEDOUT=2
|
|
|
70 |
app, rt = fast_app(debug=False, live=False, hdrs=hdrs)
|
71 |
REFRESH_TIME = 1
|
72 |
|
73 |
+
def html_create_feedback_updown_button(qe_id, ans_id, selected=0):
|
74 |
+
html_target_id = f"buttons_{ans_id}"
|
75 |
+
colors = ["grey", "blue"]
|
76 |
+
up_col = colors[0]
|
77 |
+
down_col = colors[0]
|
78 |
+
if selected == 1: up_col = colors[1]
|
79 |
+
if selected == -1: down_col = colors[1]
|
80 |
+
toggle_url = f"toggle_up_down/{qe_id}/{ans_id}/"
|
81 |
+
up = Button("+", hx_put=f"{toggle_url}?which=1", hx_swap="outerHTML",
|
82 |
+
hx_target="#" + html_target_id, style=f"background-color:{up_col}")
|
83 |
+
down = Button("-", hx_put=f"{toggle_url}?which=-1", hx_swap="outerHTML",
|
84 |
+
hx_target="#" + html_target_id, style=f"background-color:{down_col}")
|
85 |
+
button_row = Div(up, down, _id=html_target_id)
|
86 |
+
return button_row
|
87 |
+
|
88 |
+
|
89 |
+
def html_augment_evaluation_text_with_feedback(eval_html, qe_id, ans_id, selected=0):
|
90 |
+
"""
|
91 |
+
Will plot the + / - buttons for feedback.
|
92 |
+
|
93 |
+
:param eval_html:
|
94 |
+
:param qe_id:
|
95 |
+
:param ans_id:
|
96 |
+
:return:
|
97 |
+
"""
|
98 |
+
|
99 |
+
buttons = html_create_feedback_updown_button(qe_id, ans_id, selected)
|
100 |
+
final_div = Div(eval_html, buttons, style=" background-color: #f0f0f0;")
|
101 |
+
return final_div
|
102 |
+
|
103 |
+
|
104 |
+
@rt("/toggle_up_down/{qe_id}/{ans_id}")
|
105 |
+
def put(session, qe_id:int, ans_id:int, which:int):
|
106 |
+
"""
|
107 |
+
Answer to the +/- button presses
|
108 |
+
|
109 |
+
:param session:
|
110 |
+
:param qe_id:
|
111 |
+
:param ans_id:
|
112 |
+
:param which:
|
113 |
+
:return:
|
114 |
+
"""
|
115 |
+
print(qe_id, ans_id, which)
|
116 |
+
if which not in {-1, 1}:
|
117 |
+
return None
|
118 |
+
if 'session_id' not in session: return None
|
119 |
+
session_id = session["session_id"]
|
120 |
+
state_rows = session_state_table(limit=1, where=f"session_id == '{session_id}'", order_by="id DESC")
|
121 |
+
if len(state_rows) <= 0:
|
122 |
+
return None
|
123 |
+
answer_id = state_rows[0].current_qeval
|
124 |
+
qa_obj_row = question_evaluation_table(limit=1, where=f"id == {answer_id}")
|
125 |
+
if len(qa_obj_row) <= 0:
|
126 |
+
return None
|
127 |
+
qa_obj = qa_obj_row[0]
|
128 |
+
if qe_id != qa_obj.id:
|
129 |
+
print(f"QE {qe_id} does not belong to {qa_obj.id}")
|
130 |
+
return None
|
131 |
+
answer_eval_js = json.loads(qa_obj.answer_eval_text)
|
132 |
+
crt_selection = answer_eval_js[ans_id]["EVAL"]
|
133 |
+
input_button = which
|
134 |
+
out_selection = (input_button if crt_selection == 0 else (0 if crt_selection == input_button else input_button))
|
135 |
+
print(f"out selection: {out_selection}")
|
136 |
+
# store it back in DB
|
137 |
+
answer_eval_js[ans_id]["EVAL"] = out_selection
|
138 |
+
qa_obj.answer_eval_text = answer_eval_js
|
139 |
+
qa_obj.submitted = False # mark object as dirty
|
140 |
+
question_evaluation_table.upsert(qa_obj)
|
141 |
+
|
142 |
+
buttons= html_create_feedback_updown_button(qe_id, ans_id, selected=out_selection)
|
143 |
+
return buttons
|
144 |
+
|
145 |
+
|
146 |
+
def html_get_textual_feedback_form(qe_obj):
|
147 |
+
form = Form(Input(), Button("Submit"))
|
148 |
+
div = Div(P("Give us a general feedback for the evaluation (optional)"), form)
|
149 |
+
return div
|
150 |
+
|
151 |
|
152 |
+
def html_format_code_review_form(qe_obj, html_id=""):
|
153 |
+
"""
|
154 |
+
Formats the code review, adding fields for feedback if it is required.
|
155 |
+
|
156 |
+
|
157 |
+
:param feedback_js:
|
158 |
+
:param c_code:
|
159 |
+
:param html_id:
|
160 |
+
:return:
|
161 |
+
"""
|
162 |
+
c_code = qe_obj.code_text
|
163 |
+
enhanced_answer = json.loads(qe_obj.answer_eval_text)
|
164 |
list_of_citerias = []
|
165 |
+
for caug_code, caug_txt in eval_code.CODE_AUGMENTATIONS:
|
166 |
crit_tag = [H3(caug_code), P(caug_txt)]
|
167 |
list_of_citerias.extend(crit_tag)
|
168 |
# yeah, I know . . .
|
169 |
+
for k, eval_line in enumerate(enhanced_answer):
|
|
|
170 |
if caug_code == eval_line["criteria"]:
|
171 |
eval_txt = P(eval_line["explanation"])
|
172 |
+
eval_txt_fb = html_augment_evaluation_text_with_feedback(eval_txt, qe_obj.id, k)
|
173 |
+
list_of_citerias.append(eval_txt_fb)
|
174 |
+
textual_feedback = html_get_textual_feedback_form(qe_obj)
|
175 |
+
return Div(html_render_code_output(c_code), *list_of_citerias, textual_feedback, _id=html_id)
|
|
|
176 |
|
177 |
def html_default_results(html_id):
|
178 |
return Div(P("This is where criterias will show up once the code is evaluated"), _id=html_id)
|
|
|
205 |
if eval_request_status == EVAL_STATE_NEW:
|
206 |
return html_default_results(html_id), #, html_render_inputbox(HTML_RESULTS_AREA, HTML_SUBMIT_CODE_AREA)
|
207 |
if eval_request_status == EVAL_STATE_ANSWER:
|
208 |
+
qe_obj_lst = question_evaluation_table(limit=1, where=f"id == {state_obj.current_qeval}")
|
209 |
+
if len(qe_obj_lst) < 1:
|
210 |
+
print(f"Object id {state_obj.current_qeval} can't be found in question_evaluation_table")
|
211 |
+
return (None,)
|
212 |
+
qe_obj = qe_obj_lst[0]
|
213 |
+
return (html_format_code_review_form(qe_obj, html_id),
|
214 |
html_render_inputbox(target_html_id=HTML_RESULTS_AREA, region_html_id=HTML_SUBMIT_CODE_AREA)) #, html_render_code_output(HTML_SUBMIT_CODE_AREA, state_obj.code)
|
215 |
if eval_request_status == EVAL_STATE_QUERY:
|
216 |
return html_waiting_for_results(html_id),
|
|
|
219 |
|
220 |
# How can I timeout? Well ... TBD.
|
221 |
@threaded
|
222 |
+
def call_gpt_and_store_result(session_obj_id, code_to_check):
|
223 |
+
"""
|
224 |
+
Threaded function that will submit code to LLM and wait for the answer.
|
225 |
+
|
226 |
+
Communication with "main" thread is through db.
|
227 |
+
|
228 |
+
All parameters must be pickable.
|
229 |
+
|
230 |
+
:param session_obj_id:
|
231 |
+
:param code_to_check:
|
232 |
+
:return:
|
233 |
+
"""
|
234 |
+
# TODO refactor considering new join!
|
235 |
# print("evaluatign code")
|
236 |
try:
|
237 |
# Pesky way to get a new cursor, in a thread safe way, into the db. This code runs in another thread.
|
238 |
# Can we do better?
|
239 |
+
local_database = Database(DATABASE_NAME)
|
240 |
+
local_sess_state = local_database.t.session_state
|
241 |
+
local_sess_state_cls = local_sess_state.dataclass()
|
242 |
+
local_sess_obj_lst = local_sess_state(limit=1, where=f"id == {session_obj_id}")
|
243 |
local_sess_obj = local_sess_obj_lst[0]
|
244 |
+
|
245 |
+
# Trigger the lenghtly operation
|
246 |
+
enhanced_answer = eval_code.eval_the_piece_of_c_code(openai_client=None, ccode=code_to_check)
|
247 |
+
|
248 |
+
# we create a new QA entry.
|
249 |
+
qa_obj = Question_Evaluation_cls(code_text=code_to_check, answer_eval_text=enhanced_answer, submitted=0)
|
250 |
+
qa_obj = question_evaluation_table.insert(qa_obj)
|
251 |
+
local_sess_obj.current_qeval = qa_obj.id
|
252 |
+
|
253 |
+
# TODO save the outcome in a table, where it will be backed-up later. SqlLite is volatile.
|
254 |
+
|
255 |
+
if "error" in enhanced_answer:
|
256 |
local_sess_obj.state = EVAL_STATE_ERROR
|
257 |
+
local_sess_obj.answer = enhanced_answer["error"]
|
258 |
local_sess_obj.completed = datetime.utcnow()
|
259 |
else:
|
260 |
local_sess_obj.state = EVAL_STATE_ANSWER
|
|
|
261 |
local_sess_obj.completed = datetime.utcnow()
|
262 |
+
local_sess_state.update(local_sess_obj)
|
263 |
except:
|
264 |
import traceback
|
265 |
traceback.print_exc()
|
266 |
|
267 |
|
|
|
268 |
def html_render_inputbox(target_html_id, region_html_id):
|
269 |
txtarea = Textarea(id="ccodetoeval", name="ccodetoeval", placeholder="Enter a piece of C code", rows=3)
|
270 |
form = Form(Group(txtarea, Button("Evaluate")),
|
|
|
314 |
P("!! The data will be saved and maybe made public !!"),
|
315 |
]
|
316 |
|
317 |
+
# ############
|
318 |
+
# # !!! FOR DEBUGGING PURPOSES !!!
|
319 |
+
#
|
320 |
+
# # insert an answer in db.
|
321 |
+
# code = """
|
322 |
+
# #include <stdio.h>
|
323 |
+
# int main() {
|
324 |
+
# // printf() displays the string inside quotation
|
325 |
+
# printf("Hello, World!");
|
326 |
+
# return 0;
|
327 |
+
# }
|
328 |
+
# """
|
329 |
+
# answer = eval_code.eval_the_piece_of_c_code(None, None)
|
330 |
+
# enhanced_answer = eval_code.add_evaluation_fields_on_js_answer(answer, CODE_AUGMENTATIONS)
|
331 |
+
# session_obj = Session_State_cls(
|
332 |
+
# session_id=session_id,
|
333 |
+
# state=EVAL_STATE_ANSWER,
|
334 |
+
# submitted=datetime.utcnow(),
|
335 |
+
# completed=datetime.utcnow(),
|
336 |
+
# )
|
337 |
+
#
|
338 |
+
# # we create a new QA entry.
|
339 |
+
# qa_obj = Question_Evaluation_cls(code_text=code,
|
340 |
+
# answer_eval_text=enhanced_answer, submitted=0)
|
341 |
+
# qa_obj = question_evaluation_table.insert(qa_obj)
|
342 |
+
# session_obj.current_qeval = qa_obj.id
|
343 |
+
# session_obj = session_state_table.insert(session_obj)
|
344 |
+
#
|
345 |
+
# ############
|
346 |
+
|
347 |
input_area = html_render_inputbox(target_html_id=HTML_RESULTS_AREA, region_html_id=HTML_SUBMIT_CODE_AREA)
|
348 |
results_area = html_render_answer_from_db(session_id, HTML_RESULTS_AREA)
|
349 |
clear_area = render_clear_area(session_id, HTML_CLEAR_FORM)
|
|
|
365 |
session_id=session_id,
|
366 |
state=EVAL_STATE_QUERY,
|
367 |
submitted=datetime.utcnow(),
|
|
|
|
|
368 |
)
|
369 |
+
|
370 |
+
# assert False, "Need to refactor to insert new answer request and update the current answerID"
|
371 |
+
|
372 |
# we insert and we get the new primary key
|
373 |
session_obj = session_state_table.insert(session_obj)
|
374 |
# will be executed in another thread with magic @threaded
|
|
|
387 |
session_id=session_id,
|
388 |
state=EVAL_STATE_NEW,
|
389 |
submitted=datetime.utcnow(),
|
|
|
|
|
390 |
)
|
391 |
session_state_table.insert(session_obj)
|
392 |
# re-issue forms
|
test/test_dbops.py
CHANGED
@@ -1,5 +1,9 @@
|
|
1 |
from fasthtml.common import *
|
2 |
from sqlite_minutils.db import Database
|
|
|
|
|
|
|
|
|
3 |
def test_some_sqldb_inmem():
|
4 |
|
5 |
db = Database(memory=True)
|
@@ -25,3 +29,21 @@ def test_some_sqldb_inmem():
|
|
25 |
assert len(newg) == 1
|
26 |
assert newg[0].prompt == "New prompt"
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fasthtml.common import *
|
2 |
from sqlite_minutils.db import Database
|
3 |
+
import json
|
4 |
+
|
5 |
+
import eval_code
|
6 |
+
|
7 |
def test_some_sqldb_inmem():
|
8 |
|
9 |
db = Database(memory=True)
|
|
|
29 |
assert len(newg) == 1
|
30 |
assert newg[0].prompt == "New prompt"
|
31 |
|
32 |
+
|
33 |
+
def test_sqlite_jsonops():
|
34 |
+
db = Database(memory=True)
|
35 |
+
ans_js = eval_code.eval_the_piece_of_c_code(None, "")
|
36 |
+
enh_ans_js = eval_code.add_evaluation_fields_on_js_answer(ans_js)
|
37 |
+
qa_table = db.t.question_answers.create(id=int, session_id=str, question=str,
|
38 |
+
answerjs=str, pk="id")
|
39 |
+
qa_cls = qa_table.dataclass()
|
40 |
+
qa1 = qa_cls(question="Question", answerjs=enh_ans_js)
|
41 |
+
assert qa1.id is None
|
42 |
+
qa1 = qa_table.insert(qa1)
|
43 |
+
assert qa1.id >= 0
|
44 |
+
newans = json.loads(qa1.answerjs)
|
45 |
+
newans[2]["EVAL"] = eval_code.EVAL_ANSWER_POSITIVE
|
46 |
+
qa1.answerjs = newans
|
47 |
+
qa_table.update(qa1)
|
48 |
+
got_qa = qa_table(where=f"id == {qa1.id}")
|
49 |
+
assert "EVAL\": 1" in got_qa[0].answerjs
|