Saranath07 commited on
Commit
7ebd50e
·
1 Parent(s): 1914d3f

first commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ chroma/**/*.bin filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
- title: PythonQuestionMaker
3
- emoji: 📈
4
- colorFrom: yellow
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Example Gradio App
3
+ emoji: 🏢
4
+ colorFrom: pink
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Testing the application
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
TODO.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ - Tag based listing
2
+ - Display every tags
3
+ - Search for that particular tag
4
+
5
+ - Add documentation for each file
6
+
7
+ - Deployment in HuggingFace Spaces
__pycache__/PythonQuestionMaker.cpython-312.pyc ADDED
Binary file (3.45 kB). View file
 
__pycache__/app.cpython-312.pyc ADDED
Binary file (5.03 kB). View file
 
__pycache__/convert_to_json.cpython-312.pyc ADDED
Binary file (1.31 kB). View file
 
__pycache__/first_page.cpython-312.pyc ADDED
Binary file (2.31 kB). View file
 
__pycache__/run.cpython-312.pyc ADDED
Binary file (2.1 kB). View file
 
__pycache__/second_page.cpython-312.pyc ADDED
Binary file (4.33 kB). View file
 
__pycache__/template.cpython-312.pyc ADDED
Binary file (2.56 kB). View file
 
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from langchain_chroma.vectorstores import Chroma
4
+ from langchain_huggingface.embeddings import (
5
+ HuggingFaceEmbeddings,
6
+ HuggingFaceEndpointEmbeddings,
7
+ )
8
+ import json
9
+ from convert_to_json import data_to_json
10
+
11
+ from template import make_template_outputs, make_template_testcases
12
+
13
+ if os.environ.get("HUGGINGFACEHUB_API_TOKEN"):
14
+ embedding = HuggingFaceEndpointEmbeddings(
15
+ repo_id="sentence-transformers/all-MiniLM-L6-v2",
16
+ huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
17
+ )
18
+ else:
19
+ embedding = HuggingFaceEmbeddings(
20
+ model_name="sentence-transformers/all-MiniLM-L6-v2"
21
+ )
22
+
23
+
24
+
25
+ count = 0
26
+
27
+
28
+ def increment_count():
29
+ global count
30
+ count += 1
31
+ return count
32
+
33
+
34
+ def print_n_value(n_value):
35
+ global no_tests
36
+ no_tests = n_value
37
+ return n_value # Return the value if needed for further processing
38
+
39
+
40
+ def submit_second_page(topic):
41
+ db_store = Chroma(collection_name="python-questions", persist_directory="./chroma", embedding_function=embedding)
42
+ questions = db_store.similarity_search(topic)
43
+ questions_json = json.loads(data_to_json(questions))
44
+ return questions_json, gr.update(choices=[d["question"] for d in questions_json])
45
+
46
+
47
+ def create_first_page(data_state):
48
+ # solution_visible = gr.State(False)
49
+ with gr.Column(visible=True) as page1:
50
+ gr.Markdown("# Programming in Python")
51
+ with gr.Row():
52
+ with gr.Column(scale=1):
53
+ topic = gr.Textbox(label="Select Topic")
54
+ submit2 = gr.Button("Submit", elem_id="submit2")
55
+
56
+ with gr.Tab("Question"):
57
+ question_select = gr.Dropdown(
58
+ label="Select Question", choices=[], interactive=True
59
+ )
60
+ question_display = gr.Textbox(label="Question", interactive=False)
61
+
62
+ with gr.Tab("Test Cases"):
63
+ testcases_state = gr.Markdown(label="Test Cases")
64
+
65
+ with gr.Tab("Output"):
66
+ outputs_md = gr.Markdown(label="Output") # JSON output component
67
+
68
+ with gr.Tab("Solution 🔒"):
69
+ solution = gr.Code("Solution Locked")
70
+
71
+ with gr.Column(scale=1):
72
+ code_input = gr.Code(
73
+ label="Write your code here",
74
+ language="python",
75
+ lines=10,
76
+ interactive=True,
77
+ )
78
+ run_button = gr.Button("Run")
79
+
80
+ # Connect buttons to functions
81
+ submit2.click(
82
+ fn=submit_second_page, inputs=[topic], outputs=[data_state, question_select]
83
+ )
84
+
85
+ question_select.change(
86
+ fn=make_template_testcases,
87
+ inputs=[question_select, data_state],
88
+ outputs=[solution, question_display, testcases_state, code_input],
89
+ )
90
+
91
+ run_button.click(
92
+ fn=lambda code, question, data: (
93
+ make_template_outputs(code, question, data)
94
+ ),
95
+ inputs=[code_input, question_select, data_state],
96
+ outputs=[outputs_md],
97
+ )
98
+
99
+ return page1, question_select
100
+
101
+
102
+ # Initialize the Gradio app
103
+ with gr.Blocks(css=".small-button { padding: 5px 10px; font-size: 12px; }") as demo:
104
+ data_state = gr.State([])
105
+
106
+ # First page is now the new topic and question selection interface
107
+ page1_content, question_select = create_first_page(data_state)
108
+
109
+ demo.launch(share=True)
chroma/chroma.sqlite3 ADDED
Binary file (356 kB). View file
 
chroma/f172bc37-bb88-40cb-89e4-c69b01eea80d/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c9fd302f000d7790aa403c2d0d8fec363fe46f30b07d53020b6e33b22435a9
3
+ size 1676000
chroma/f172bc37-bb88-40cb-89e4-c69b01eea80d/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e87a1dc8bcae6f2c4bea6d5dd5005454d4dace8637dae29bff3c037ea771411e
3
+ size 100
chroma/f172bc37-bb88-40cb-89e4-c69b01eea80d/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc19b1997119425765295aeab72d76faa6927d4f83985d328c26f20468d6cc76
3
+ size 4000
chroma/f172bc37-bb88-40cb-89e4-c69b01eea80d/link_lists.bin ADDED
File without changes
convert_to_json.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ """
4
+ """
5
+
6
+ def data_to_json(questions):
7
+ questions_json = []
8
+
9
+ for doc in questions:
10
+ question_obj = {
11
+ "question": doc.page_content,
12
+ "question_template": doc.metadata.get("question_template"),
13
+ "function_name": doc.metadata.get("function_name"),
14
+ "data_formats": doc.metadata.get("data_formats", ""),
15
+ "solution": doc.metadata.get("solution", ""),
16
+ "tags": doc.metadata.get("tags", ""),
17
+ "testcases": json.loads(doc.metadata.get("testcases", "[]")),
18
+ "input_type": doc.metadata.get("input_type", ""),
19
+ }
20
+ questions_json.append(question_obj)
21
+
22
+ # Convert the list to a JSON string
23
+ json_output = json.dumps(questions_json, indent=2)
24
+
25
+ return json_output
requirements.txt ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohappyeyeballs==2.4.0
3
+ aiohttp==3.10.6
4
+ aiosignal==1.3.1
5
+ annotated-types==0.7.0
6
+ anyio==4.6.0
7
+ asgiref==3.8.1
8
+ asttokens==2.4.1
9
+ attrs==24.2.0
10
+ backoff==2.2.1
11
+ bcrypt==4.2.0
12
+ black==24.8.0
13
+ build==1.2.2
14
+ cachetools==5.5.0
15
+ certifi==2024.8.30
16
+ charset-normalizer==3.3.2
17
+ chroma-hnswlib==0.7.6
18
+ chromadb==0.5.9
19
+ click==8.1.7
20
+ coloredlogs==15.0.1
21
+ contourpy==1.3.0
22
+ cycler==0.12.1
23
+ dataclasses-json==0.6.7
24
+ decorator==5.1.1
25
+ Deprecated==1.2.14
26
+ distro==1.9.0
27
+ duckdb==1.1.1
28
+ durationpy==0.7
29
+ executing==2.1.0
30
+ fastapi==0.115.0
31
+ ffmpy==0.4.0
32
+ filelock==3.16.1
33
+ flatbuffers==24.3.25
34
+ fonttools==4.54.1
35
+ frozenlist==1.4.1
36
+ fsspec==2024.9.0
37
+ google-auth==2.35.0
38
+ googleapis-common-protos==1.65.0
39
+ gradio==4.44.0
40
+ gradio_client==1.3.0
41
+ greenlet==3.1.1
42
+ groq==0.11.0
43
+ grpcio==1.66.1
44
+ h11==0.14.0
45
+ httpcore==1.0.5
46
+ httptools==0.6.1
47
+ httpx==0.27.2
48
+ huggingface-hub==0.25.1
49
+ humanfriendly==10.0
50
+ idna==3.10
51
+ importlib_metadata==8.4.0
52
+ importlib_resources==6.4.5
53
+ ipython==8.27.0
54
+ jedi==0.19.1
55
+ Jinja2==3.1.4
56
+ joblib==1.4.2
57
+ jsonpatch==1.33
58
+ jsonpointer==3.0.0
59
+ kiwisolver==1.4.7
60
+ kubernetes==31.0.0
61
+ langchain==0.3.1
62
+ langchain-chroma==0.1.4
63
+ langchain-community==0.3.1
64
+ langchain-core==0.3.6
65
+ langchain-groq==0.2.0
66
+ langchain-huggingface==0.1.0
67
+ langchain-text-splitters==0.3.0
68
+ langsmith==0.1.128
69
+ markdown-it-py==3.0.0
70
+ MarkupSafe==2.1.5
71
+ marshmallow==3.22.0
72
+ matplotlib==3.9.2
73
+ matplotlib-inline==0.1.7
74
+ mdurl==0.1.2
75
+ mmh3==5.0.1
76
+ monotonic==1.6
77
+ mpmath==1.3.0
78
+ multidict==6.1.0
79
+ mypy-extensions==1.0.0
80
+ networkx==3.3
81
+ numpy==1.26.4
82
+ nvidia-cublas-cu12==12.1.3.1
83
+ nvidia-cuda-cupti-cu12==12.1.105
84
+ nvidia-cuda-nvrtc-cu12==12.1.105
85
+ nvidia-cuda-runtime-cu12==12.1.105
86
+ nvidia-cudnn-cu12==9.1.0.70
87
+ nvidia-cufft-cu12==11.0.2.54
88
+ nvidia-curand-cu12==10.3.2.106
89
+ nvidia-cusolver-cu12==11.4.5.107
90
+ nvidia-cusparse-cu12==12.1.0.106
91
+ nvidia-nccl-cu12==2.20.5
92
+ nvidia-nvjitlink-cu12==12.6.68
93
+ nvidia-nvtx-cu12==12.1.105
94
+ oauthlib==3.2.2
95
+ onnxruntime==1.19.2
96
+ opentelemetry-api==1.27.0
97
+ opentelemetry-exporter-otlp-proto-common==1.27.0
98
+ opentelemetry-exporter-otlp-proto-grpc==1.27.0
99
+ opentelemetry-instrumentation==0.48b0
100
+ opentelemetry-instrumentation-asgi==0.48b0
101
+ opentelemetry-instrumentation-fastapi==0.48b0
102
+ opentelemetry-proto==1.27.0
103
+ opentelemetry-sdk==1.27.0
104
+ opentelemetry-semantic-conventions==0.48b0
105
+ opentelemetry-util-http==0.48b0
106
+ orjson==3.10.7
107
+ overrides==7.7.0
108
+ packaging==24.1
109
+ pandas==2.2.3
110
+ parso==0.8.4
111
+ pathspec==0.12.1
112
+ pexpect==4.9.0
113
+ pillow==10.4.0
114
+ platformdirs==4.3.6
115
+ posthog==3.6.6
116
+ prompt_toolkit==3.0.47
117
+ protobuf==4.25.5
118
+ ptyprocess==0.7.0
119
+ pure_eval==0.2.3
120
+ pyasn1==0.6.1
121
+ pyasn1_modules==0.4.1
122
+ pydantic==2.9.2
123
+ pydantic-settings==2.5.2
124
+ pydantic_core==2.23.4
125
+ pydub==0.25.1
126
+ Pygments==2.18.0
127
+ pyparsing==3.1.4
128
+ PyPika==0.48.9
129
+ pyproject_hooks==1.1.0
130
+ python-dateutil==2.9.0.post0
131
+ python-dotenv==1.0.1
132
+ python-multipart==0.0.10
133
+ pytz==2024.2
134
+ PyYAML==6.0.2
135
+ regex==2024.9.11
136
+ requests==2.32.3
137
+ requests-oauthlib==2.0.0
138
+ rich==13.8.1
139
+ rsa==4.9
140
+ ruff==0.6.8
141
+ safetensors==0.4.5
142
+ scikit-learn==1.5.2
143
+ scipy==1.14.1
144
+ semantic-version==2.10.0
145
+ sentence-transformers==3.1.1
146
+ setuptools==75.1.0
147
+ shellingham==1.5.4
148
+ six==1.16.0
149
+ sniffio==1.3.1
150
+ SQLAlchemy==2.0.35
151
+ stack-data==0.6.3
152
+ starlette==0.38.6
153
+ sympy==1.13.3
154
+ tenacity==8.5.0
155
+ threadpoolctl==3.5.0
156
+ tokenize-rt==6.0.0
157
+ tokenizers==0.20.0
158
+ tomlkit==0.12.0
159
+ torch==2.4.1
160
+ tqdm==4.66.5
161
+ traitlets==5.14.3
162
+ transformers==4.45.0
163
+ triton==3.0.0
164
+ typer==0.12.5
165
+ typing-inspect==0.9.0
166
+ typing_extensions==4.12.2
167
+ tzdata==2024.2
168
+ urllib3==2.2.3
169
+ uvicorn==0.30.6
170
+ uvloop==0.20.0
171
+ watchfiles==0.24.0
172
+ wcwidth==0.2.13
173
+ websocket-client==1.8.0
174
+ websockets==12.0
175
+ wrapt==1.16.0
176
+ yarl==1.12.1
177
+ zipp==3.20.2
run.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import ast
5
+ import requests
6
+ from difflib import Differ
7
+ from jinja2 import Template, Environment
8
+
9
+
10
+ def update_question(selected_question, data):
11
+
12
+ if data is None or not data:
13
+ return "", "", "", "", "", ""
14
+ selected_data = next(d for d in data if d["question"] == selected_question)
15
+ function_template = selected_data["question_template"].replace("\\n", "\n")
16
+ # print(selected_data)
17
+ return (
18
+ selected_data["solution"],
19
+ selected_data["question"],
20
+ selected_data["testcases"],
21
+ function_template,
22
+ )
23
+
24
+
25
+ def run_code(code_snippet, test_cases, input_type="stdin"):
26
+
27
+ actual_output_messages = []
28
+ expected_output_messages = []
29
+ if input_type == "code":
30
+ code_snippet += "\nimport sys; exec(sys.stdin.read())"
31
+
32
+ # print(code_snippet)
33
+ for test_case in test_cases:
34
+
35
+ expected_output = test_case["output"]
36
+ payload = {
37
+ "language": "python",
38
+ "version": "3.10.0",
39
+ "files": [{"name": "script.py", "content": code_snippet}],
40
+ "stdin": test_case["input"],
41
+ }
42
+
43
+ response = requests.post("https://emkc.org/api/v2/piston/execute", json=payload)
44
+ execution_result = response.json()
45
+
46
+ actual_output = (
47
+ execution_result["run"]["output"].strip()
48
+ if "run" in execution_result and "output" in execution_result["run"]
49
+ else ""
50
+ )
51
+ actual_output_messages.append(actual_output)
52
+
53
+ expected_output_messages.append(expected_output)
54
+
55
+ output_json = {
56
+ "actual_output": actual_output_messages,
57
+ "expected_output": expected_output_messages,
58
+ }
59
+ return output_json
run.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ arg=$1
4
+ arg=${arg:-app.py}
5
+
6
+ PYTHONPATH="$HOME/gradio-frontend-ai-prog-gen:$PYTHONPATH" gradio "$arg"
template.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import ast
5
+ import requests
6
+ from difflib import Differ
7
+ from jinja2 import Template, Environment
8
+
9
+
10
+ # output_json = {"actual_output": actual_output_messages, "expected_output":expected_output_messages }
11
+ # input,
12
+ def make_template_testcases(selected_question, data):
13
+ from run import update_question
14
+
15
+ solution, question_display, testcases, code_input = update_question(
16
+ selected_question, data
17
+ )
18
+ testcases_template = Template(
19
+ """
20
+ {% for testcase in testcases %}
21
+ ### Input {{loop.index}}
22
+ ```
23
+ {{testcase.input}}
24
+ ```
25
+ ### Expected Output {{loop.index}}
26
+ ```
27
+ {{testcase.output}}
28
+ ```
29
+
30
+
31
+ {% endfor %}
32
+ """
33
+ )
34
+ return (
35
+ solution,
36
+ question_display,
37
+ testcases_template.render(testcases=testcases),
38
+ code_input,
39
+ )
40
+
41
+
42
+ def make_template_outputs(code_snippet, selected_question, test_data):
43
+ from run import run_code
44
+
45
+ selected_data = next(
46
+ (item for item in test_data if item["question"] == selected_question), None
47
+ )
48
+ test_cases = selected_data["testcases"]
49
+ input_type = selected_data["input_type"]
50
+
51
+ output_json = run_code(code_snippet, test_cases, input_type)
52
+
53
+ def zip_filter(a, b):
54
+ return zip(a, b)
55
+
56
+ env = Environment()
57
+ env.filters["zip"] = zip_filter
58
+
59
+ output_template = env.from_string(
60
+ """
61
+ {% set list1 = output_json['actual_output'] %}
62
+ {% set list2 = output_json['expected_output'] %}
63
+ {% for item1, item2 in list1 | zip(list2) %}
64
+ ### Actual Output {{loop.index}}
65
+ {% if item1 == item2 %}
66
+ Passed ✅
67
+ {% else %}
68
+ Expected: {{ item2 }}, but got: {{ item1 }} ❌
69
+ {% endif %}
70
+ {% endfor %}
71
+
72
+ """
73
+ )
74
+
75
+ return output_template.render(output_json=output_json)