Spaces:
Runtime error
Runtime error
Jumper-Clown
commited on
Commit
•
c82eb8a
1
Parent(s):
7711652
add ctransformers options, only make buttons visible when ready.
Browse files- .idea/inspectionProfiles/Project_Default.xml +14 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +7 -0
- .idea/vcs.xml +6 -0
- .idea/workspace.xml +89 -0
- README.md +10 -1
- __pycache__/ai.cpython-311.pyc +0 -0
- __pycache__/cpu_ai.cpython-311.pyc +0 -0
- __pycache__/github_manager.cpython-311.pyc +0 -0
- ai.py +55 -0
- app.py +58 -131
- cpu_ai.py +62 -0
- github_manager.py +30 -3
- models/abacaj/config.json +1 -0
- models/abacaj/replit-v2-codeinstruct-3b.q4_1.bin +1 -0
- models/marella/config.json +1 -0
- models/marella/ggml-model.bin +1 -0
- requirements.txt +8 -1
- resources/main.py +29 -0
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
5 |
+
<option name="ignoredPackages">
|
6 |
+
<value>
|
7 |
+
<list size="1">
|
8 |
+
<item index="0" class="java.lang.String" itemvalue="panel" />
|
9 |
+
</list>
|
10 |
+
</value>
|
11 |
+
</option>
|
12 |
+
</inspection_tool>
|
13 |
+
</profile>
|
14 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="Black">
|
4 |
+
<option name="sdkName" value="Python 3.11 (Interpreter)" />
|
5 |
+
</component>
|
6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (Interpreter)" project-jdk-type="Python SDK" />
|
7 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
.idea/workspace.xml
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="AutoImportSettings">
|
4 |
+
<option name="autoReloadType" value="SELECTIVE" />
|
5 |
+
</component>
|
6 |
+
<component name="ChangeListManager">
|
7 |
+
<list default="true" id="ba1eab7b-54ae-409d-bac6-f23bdc636598" name="Changes" comment="">
|
8 |
+
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
9 |
+
<change beforePath="$PROJECT_DIR$/app.py" beforeDir="false" afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
|
10 |
+
</list>
|
11 |
+
<option name="SHOW_DIALOG" value="false" />
|
12 |
+
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
13 |
+
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
14 |
+
<option name="LAST_RESOLUTION" value="IGNORE" />
|
15 |
+
</component>
|
16 |
+
<component name="Git.Settings">
|
17 |
+
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
|
18 |
+
</component>
|
19 |
+
<component name="MarkdownSettingsMigration">
|
20 |
+
<option name="stateVersion" value="1" />
|
21 |
+
</component>
|
22 |
+
<component name="ProjectColorInfo">{
|
23 |
+
"customColor": "",
|
24 |
+
"associatedIndex": 1
|
25 |
+
}</component>
|
26 |
+
<component name="ProjectId" id="2brQfGQyfPQ65PvvvHHagDIifzV" />
|
27 |
+
<component name="ProjectViewState">
|
28 |
+
<option name="hideEmptyMiddlePackages" value="true" />
|
29 |
+
<option name="showLibraryContents" value="true" />
|
30 |
+
</component>
|
31 |
+
<component name="PropertiesComponent">{
|
32 |
+
"keyToString": {
|
33 |
+
"Python.Gradio.executor": "Run",
|
34 |
+
"Python.app.executor": "Run",
|
35 |
+
"RunOnceActivity.OpenProjectViewOnStart": "true",
|
36 |
+
"RunOnceActivity.ShowReadmeOnStart": "true",
|
37 |
+
"SHARE_PROJECT_CONFIGURATION_FILES": "true",
|
38 |
+
"git-widget-placeholder": "main",
|
39 |
+
"last_opened_file_path": "/home/jc/devel/python/Interpreter",
|
40 |
+
"settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
|
41 |
+
}
|
42 |
+
}</component>
|
43 |
+
<component name="RunManager">
|
44 |
+
<configuration name="Gradio" type="PythonConfigurationType" factoryName="Python">
|
45 |
+
<module name="TextToGame" />
|
46 |
+
<option name="ENV_FILES" value="" />
|
47 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
48 |
+
<option name="PARENT_ENVS" value="true" />
|
49 |
+
<envs>
|
50 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
51 |
+
<env name="GIT_REPO_NAME" value="text-to-game" />
|
52 |
+
<env name="GIT_TOKEN" value="1" />
|
53 |
+
<env name="GIT_USER_NAME" value="PJAvanRooyen" />
|
54 |
+
</envs>
|
55 |
+
<option name="SDK_HOME" value="" />
|
56 |
+
<option name="SDK_NAME" value="Python 3.11 (Interpreter)" />
|
57 |
+
<option name="WORKING_DIRECTORY" value="" />
|
58 |
+
<option name="IS_MODULE_SDK" value="false" />
|
59 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
60 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
61 |
+
<option name="SCRIPT_NAME" value="$FileDir$/app.py" />
|
62 |
+
<option name="PARAMETERS" value="" />
|
63 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
64 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
65 |
+
<option name="MODULE_MODE" value="false" />
|
66 |
+
<option name="REDIRECT_INPUT" value="false" />
|
67 |
+
<option name="INPUT_FILE" value="" />
|
68 |
+
<method v="2" />
|
69 |
+
</configuration>
|
70 |
+
</component>
|
71 |
+
<component name="SharedIndexes">
|
72 |
+
<attachedChunks>
|
73 |
+
<set>
|
74 |
+
<option value="bundled-python-sdk-5a2391486177-d3b881c8e49f-com.jetbrains.pycharm.community.sharedIndexes.bundled-PC-233.13763.11" />
|
75 |
+
</set>
|
76 |
+
</attachedChunks>
|
77 |
+
</component>
|
78 |
+
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
|
79 |
+
<component name="TaskManager">
|
80 |
+
<task active="true" id="Default" summary="Default task">
|
81 |
+
<changelist id="ba1eab7b-54ae-409d-bac6-f23bdc636598" name="Changes" comment="" />
|
82 |
+
<created>1706970818729</created>
|
83 |
+
<option name="number" value="Default" />
|
84 |
+
<option name="presentableId" value="Default" />
|
85 |
+
<updated>1706970818729</updated>
|
86 |
+
</task>
|
87 |
+
<servers />
|
88 |
+
</component>
|
89 |
+
</project>
|
README.md
CHANGED
@@ -10,4 +10,13 @@ pinned: false
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
Model and its usage:
|
14 |
+
https://github.com/abacaj/replit-3B-inference
|
15 |
+
|
16 |
+
https://medium.com/@fareedkhandev/running-a-free-powerful-ai-coding-llm-on-cpu-6945544f3992
|
17 |
+
|
18 |
+
https://huggingface.co/abacaj/Replit-v2-CodeInstruct-3B-ggml
|
19 |
+
|
20 |
+
Useful docs:
|
21 |
+
https://pygame-web.github.io/wiki/pygbag/github.io/
|
22 |
+
|
__pycache__/ai.cpython-311.pyc
ADDED
Binary file (2.34 kB). View file
|
|
__pycache__/cpu_ai.cpython-311.pyc
ADDED
Binary file (2.55 kB). View file
|
|
__pycache__/github_manager.cpython-311.pyc
ADDED
Binary file (3.17 kB). View file
|
|
ai.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
|
3 |
+
import cpu_ai
|
4 |
+
|
5 |
+
models = [
|
6 |
+
"replit-v2-codeinstruct-3b.q4_1", #"abacaj/Replit-v2-CodeInstruct-3B-ggml",
|
7 |
+
"ggml-model", #"marella/gpt-2-ggml",
|
8 |
+
"WizardLM/WizardCoder-Python-34B-V1.0",
|
9 |
+
"WizardLM/WizardCoder-15B-V1.0",
|
10 |
+
"WizardLM/WizardCoder-Python-7B-V1.0",
|
11 |
+
"WizardLM/WizardCoder-3B-V1.0",
|
12 |
+
"WizardLM/WizardCoder-1B-V1.0",
|
13 |
+
]
|
14 |
+
|
15 |
+
|
16 |
+
def run_general_model(model_name, prompt, max_tokens, temperature=0.6):
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
18 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
19 |
+
return run_model(model, tokenizer, prompt, max_tokens, temperature)
|
20 |
+
|
21 |
+
|
22 |
+
def run_model(model, tokenizer, prompt, max_tokens, temperature=0.6):
|
23 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
24 |
+
tokens = model.generate(
|
25 |
+
**inputs,
|
26 |
+
max_new_tokens=max_tokens,
|
27 |
+
do_sample=True,
|
28 |
+
temperature=temperature,
|
29 |
+
eos_token_id=2,
|
30 |
+
)
|
31 |
+
output = tokenizer.decode(tokens[0], skip_special_tokens=True)
|
32 |
+
return output
|
33 |
+
|
34 |
+
|
35 |
+
def cleanup_response(generated_text):
|
36 |
+
# TODO:
|
37 |
+
# - remove comments (or convert them to python comments)
|
38 |
+
# - test if code is valid (e.g. opening brackets have closing brackets etc.)
|
39 |
+
# - wrap code in async if not yet wrapped
|
40 |
+
|
41 |
+
code = generated_text
|
42 |
+
return code
|
43 |
+
|
44 |
+
|
45 |
+
def generate_code(prompt, model_index, max_tokens, temperature=0.6):
|
46 |
+
model_full_name = models[model_index]
|
47 |
+
if model_index == 0:
|
48 |
+
output = cpu_ai.generate_code(prompt, "models/abacaj", model_full_name, max_tokens, temperature)
|
49 |
+
elif model_index == 1:
|
50 |
+
output = cpu_ai.generate_code(prompt, "models/marella", model_full_name, max_tokens, temperature)
|
51 |
+
else:
|
52 |
+
output = run_general_model(model_full_name, prompt, max_tokens, temperature)
|
53 |
+
|
54 |
+
generated_code = cleanup_response(output)
|
55 |
+
return generated_code
|
app.py
CHANGED
@@ -1,136 +1,63 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
import github_manager
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
color = color_passive
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
async def main():
|
64 |
-
user_text = ''
|
65 |
-
active = False
|
66 |
-
while True:
|
67 |
-
for event in pygame.event.get():
|
68 |
-
|
69 |
-
# if user types QUIT then the screen will close
|
70 |
-
if event.type == pygame.QUIT:
|
71 |
-
pygame.quit()
|
72 |
-
sys.exit()
|
73 |
-
|
74 |
-
if event.type == pygame.MOUSEBUTTONDOWN:
|
75 |
-
|
76 |
-
if input_rect.collidepoint(event.pos):
|
77 |
-
active = True
|
78 |
-
else:
|
79 |
-
active = False
|
80 |
-
|
81 |
-
if event.type == pygame.KEYDOWN:
|
82 |
-
print(event.key)
|
83 |
-
# Check for backspace
|
84 |
-
if event.key == pygame.K_BACKSPACE:
|
85 |
-
|
86 |
-
# get text input from 0 to -1 i.e. end.
|
87 |
-
user_text = user_text[:-1]
|
88 |
-
elif event.key == 13 or event.key == 1073741912:
|
89 |
-
if user_text == "ciao":
|
90 |
-
user_text = "Ciao anche a te"
|
91 |
-
|
92 |
-
# Unicode standard is used for string
|
93 |
-
# formation
|
94 |
-
else:
|
95 |
-
user_text += event.unicode
|
96 |
-
|
97 |
-
# it will set background color of screen
|
98 |
-
screen.fill((255, 255, 255))
|
99 |
-
|
100 |
-
if active:
|
101 |
-
color = color_active
|
102 |
-
else:
|
103 |
-
color = color_passive
|
104 |
-
|
105 |
-
# draw rectangle and argument passed which should
|
106 |
-
# be on screen
|
107 |
-
pygame.draw.rect(screen, color, input_rect)
|
108 |
-
|
109 |
-
text_surface = base_font.render(user_text, True, (255, 255, 255))
|
110 |
-
|
111 |
-
# render at position stated in arguments
|
112 |
-
screen.blit(text_surface, (input_rect.x+5, input_rect.y+5))
|
113 |
-
|
114 |
-
# set width of textfield so that text cannot get
|
115 |
-
# outside of user's text input
|
116 |
-
input_rect.w = max(100, text_surface.get_width()+10)
|
117 |
-
|
118 |
-
# display.flip() will update only a portion of the
|
119 |
-
# screen to updated, not full area
|
120 |
-
pygame.display.flip()
|
121 |
-
|
122 |
-
# clock.tick(60) means that for every second at most
|
123 |
-
# 60 frames should be passed.
|
124 |
-
clock.tick(60)
|
125 |
-
await asyncio.sleep(0)
|
126 |
-
|
127 |
-
asyncio.run(main())
|
128 |
-
""")
|
129 |
-
out = gr.Textbox(placeholder="Result")
|
130 |
-
btn = gr.Button(value="Push")
|
131 |
-
btn.click(update, inputs=[inp], outputs=[out])
|
132 |
-
gr.Markdown("""[Play the game!](https://pjavanrooyen.github.io/text-to-game/)""")
|
133 |
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
-
|
|
|
1 |
import gradio as gr
|
2 |
+
import ai
|
3 |
import github_manager
|
4 |
+
import webbrowser
|
5 |
+
|
6 |
+
|
7 |
+
def main():
|
8 |
+
example_code_file_path = "resources/main.py"
|
9 |
+
with open(example_code_file_path, "r") as file:
|
10 |
+
example_code = file.read()
|
11 |
+
|
12 |
+
pre_prompt = """Replace the "# Insert code here" sections with relevant code. Create: """
|
13 |
+
initial_prompt = """Conway's game of life, but the user controls one of the squares. If the square dies, the game is over."""
|
14 |
+
post_prompt = f"""{example_code}"""
|
15 |
+
|
16 |
+
with gr.Blocks() as app:
|
17 |
+
inp = gr.Textbox(label="Input Prompt",
|
18 |
+
value=initial_prompt)
|
19 |
+
with gr.Row():
|
20 |
+
model_choice = gr.Dropdown(label="Select Model",
|
21 |
+
choices=[m for m in ai.models],
|
22 |
+
value=ai.models[0],
|
23 |
+
type="index",
|
24 |
+
interactive=True)
|
25 |
+
no_tokens = gr.Number(label="Tokens", value=1000)
|
26 |
+
btn = gr.Button(value="Generate Game")
|
27 |
+
|
28 |
+
with gr.Column(visible=False) as save_col:
|
29 |
+
save_button = gr.Button("Build The Game")
|
30 |
+
with gr.Column(visible=False) as play_col:
|
31 |
+
play_button = gr.Button("Play The Game")
|
32 |
+
|
33 |
+
out = gr.Code(label="Generated Code")
|
34 |
+
|
35 |
+
def generate(prompt, model_index, max_tokens):
|
36 |
+
final_prompt = f"""{pre_prompt} {prompt}
|
37 |
+
{post_prompt}"""
|
38 |
+
generated_code = ai.generate_code(prompt=final_prompt,
|
39 |
+
model_index=model_index,
|
40 |
+
max_tokens=max_tokens)
|
41 |
+
return {
|
42 |
+
out: generated_code,
|
43 |
+
save_col: gr.Column(visible=True),
|
44 |
+
}
|
45 |
+
|
46 |
+
def save(text):
|
47 |
+
github_manager.update_repo(text)
|
48 |
+
return {
|
49 |
+
play_col: gr.Column(visible=True)
|
50 |
+
}
|
51 |
+
|
52 |
+
def play():
|
53 |
+
webbrowser.open(github_manager.game_link())
|
54 |
+
|
55 |
+
btn.click(generate, inputs=[inp, model_choice, no_tokens], outputs=[out, save_col], queue=True)
|
56 |
+
save_button.click(save, inputs=[out], outputs=[play_col])
|
57 |
+
play_button.click(play, inputs=[], outputs=[])
|
58 |
+
|
59 |
+
app.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
+
main()
|
cpu_ai.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dataclasses import dataclass, asdict
|
3 |
+
from ctransformers import AutoModelForCausalLM, AutoConfig
|
4 |
+
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class GenerationConfig:
|
8 |
+
temperature: float
|
9 |
+
top_k: int
|
10 |
+
top_p: float
|
11 |
+
repetition_penalty: float
|
12 |
+
max_new_tokens: int
|
13 |
+
reset: bool
|
14 |
+
stream: bool
|
15 |
+
threads: int
|
16 |
+
stop: list[str]
|
17 |
+
|
18 |
+
|
19 |
+
def format_prompt(user_prompt: str):
|
20 |
+
return f"""### Instruction:
|
21 |
+
{user_prompt}
|
22 |
+
|
23 |
+
### Response:"""
|
24 |
+
|
25 |
+
|
26 |
+
def generate(llm: AutoModelForCausalLM,
|
27 |
+
generation_config: GenerationConfig,
|
28 |
+
prompt: str):
|
29 |
+
return llm(format_prompt(prompt), **asdict(generation_config))
|
30 |
+
|
31 |
+
|
32 |
+
def generate_code(prompt, config_path, model_name, max_tokens, temperature):
|
33 |
+
path = os.path.abspath(f"{config_path}/{model_name}.bin")
|
34 |
+
|
35 |
+
config = AutoConfig.from_pretrained(
|
36 |
+
os.path.abspath(config_path),
|
37 |
+
)
|
38 |
+
llm = AutoModelForCausalLM.from_pretrained(
|
39 |
+
path,
|
40 |
+
model_type="replit",
|
41 |
+
config=config,
|
42 |
+
)
|
43 |
+
|
44 |
+
generation_config = GenerationConfig(
|
45 |
+
temperature=temperature,
|
46 |
+
top_k=50,
|
47 |
+
top_p=0.9,
|
48 |
+
repetition_penalty=1.0,
|
49 |
+
max_new_tokens=max_tokens, # adjust as needed
|
50 |
+
reset=True, # reset history (cache)
|
51 |
+
stream=True, # streaming per word/token
|
52 |
+
threads=os.cpu_count(), # adjust for your CPU
|
53 |
+
stop=["<|endoftext|>"],
|
54 |
+
)
|
55 |
+
|
56 |
+
generator = generate(llm, generation_config, prompt)
|
57 |
+
output = ""
|
58 |
+
for word in generator:
|
59 |
+
print(word)
|
60 |
+
output += word
|
61 |
+
return output
|
62 |
+
|
github_manager.py
CHANGED
@@ -12,6 +12,10 @@ branch_name = 'main'
|
|
12 |
github_token = os.environ['GIT_TOKEN']
|
13 |
|
14 |
|
|
|
|
|
|
|
|
|
15 |
def push(new_content, path, file_name):
|
16 |
if len(path) == 0:
|
17 |
file_path = file_name
|
@@ -23,7 +27,6 @@ def push(new_content, path, file_name):
|
|
23 |
headers = {'Authorization': f'token {github_token}'}
|
24 |
response = requests.get(url, headers=headers)
|
25 |
response_json = response.json()
|
26 |
-
# Extract necessary information
|
27 |
current_sha = response_json['sha']
|
28 |
|
29 |
encoded_content = base64.b64encode(new_content.encode()).decode()
|
@@ -35,14 +38,14 @@ def push(new_content, path, file_name):
|
|
35 |
'sha': current_sha,
|
36 |
'branch': branch_name
|
37 |
}
|
38 |
-
|
39 |
update_url = f'https://api.github.com/repos/{repository_owner}/{repository_name}/contents/{file_path}'
|
40 |
response = requests.put(update_url, json=update_data, headers=headers)
|
41 |
return response
|
42 |
|
43 |
|
44 |
def trigger_workflow(workflow_name):
|
45 |
-
api_url = f
|
|
|
46 |
|
47 |
headers = {
|
48 |
'Authorization': f'Bearer {github_token}',
|
@@ -55,3 +58,27 @@ def trigger_workflow(workflow_name):
|
|
55 |
|
56 |
response = requests.post(api_url, headers=headers, json=payload)
|
57 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
github_token = os.environ['GIT_TOKEN']
|
13 |
|
14 |
|
15 |
+
def game_link():
|
16 |
+
return f"""https://{repository_owner}.github.io/{repository_name}/"""
|
17 |
+
|
18 |
+
|
19 |
def push(new_content, path, file_name):
|
20 |
if len(path) == 0:
|
21 |
file_path = file_name
|
|
|
27 |
headers = {'Authorization': f'token {github_token}'}
|
28 |
response = requests.get(url, headers=headers)
|
29 |
response_json = response.json()
|
|
|
30 |
current_sha = response_json['sha']
|
31 |
|
32 |
encoded_content = base64.b64encode(new_content.encode()).decode()
|
|
|
38 |
'sha': current_sha,
|
39 |
'branch': branch_name
|
40 |
}
|
|
|
41 |
update_url = f'https://api.github.com/repos/{repository_owner}/{repository_name}/contents/{file_path}'
|
42 |
response = requests.put(update_url, json=update_data, headers=headers)
|
43 |
return response
|
44 |
|
45 |
|
46 |
def trigger_workflow(workflow_name):
|
47 |
+
api_url = f"""https://api.github.com/repos/
|
48 |
+
{repository_owner}/{repository_name}/actions/workflows/{workflow_name}/dispatches"""
|
49 |
|
50 |
headers = {
|
51 |
'Authorization': f'Bearer {github_token}',
|
|
|
58 |
|
59 |
response = requests.post(api_url, headers=headers, json=payload)
|
60 |
return response
|
61 |
+
|
62 |
+
|
63 |
+
def update_repo(text):
|
64 |
+
# TODO: use API's concurrency feature to ensure only 1 user submits at a time.
|
65 |
+
|
66 |
+
file_dir_path = ''
|
67 |
+
file_name = 'main.py'
|
68 |
+
response = push(text, file_dir_path, file_name)
|
69 |
+
if response.status_code != 200:
|
70 |
+
return response.text
|
71 |
+
else:
|
72 |
+
print(f"{file_name} pushed")
|
73 |
+
|
74 |
+
workflow_name = 'pygbag.yml'
|
75 |
+
response = trigger_workflow(workflow_name)
|
76 |
+
if response.status_code != 204:
|
77 |
+
return response.text
|
78 |
+
else:
|
79 |
+
print(f"{workflow_name} workflow started")
|
80 |
+
|
81 |
+
|
82 |
+
# TODO: poll running workflows and only return once all are done.
|
83 |
+
|
84 |
+
return "update successful"
|
models/abacaj/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
../../../../../.cache/huggingface/hub/models--abacaj--Replit-v2-CodeInstruct-3B-ggml/blobs/89f16f5d98c06fe143b1c063119092522c762d26
|
models/abacaj/replit-v2-codeinstruct-3b.q4_1.bin
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
../../../../../.cache/huggingface/hub/models--abacaj--Replit-v2-CodeInstruct-3B-ggml/blobs/3be2edcc7a10e90ccf33ea868cd128e909a94ca6985806070f758a0d2c495a33
|
models/marella/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
../../../../../.cache/huggingface/hub/models--marella--gpt-2-ggml/blobs/b355ccf716045de455cc202c1683374d634f2615
|
models/marella/ggml-model.bin
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
../../../../../.cache/huggingface/hub/models--marella--gpt-2-ggml/blobs/b457d5fcc7f2f71e727bee74298d42d80610619e02af16beca53d44a71d5f607
|
requirements.txt
CHANGED
@@ -1 +1,8 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
sentencepiece
|
6 |
+
requests
|
7 |
+
|
8 |
+
ctransformers
|
resources/main.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pygame
|
2 |
+
import sys
|
3 |
+
import asyncio
|
4 |
+
|
5 |
+
pygame.init()
|
6 |
+
clock = pygame.time.Clock()
|
7 |
+
screen = pygame.display.set_mode([600, 500])
|
8 |
+
base_font = pygame.font.Font(None, 32)
|
9 |
+
|
10 |
+
async def main():
|
11 |
+
while True:
|
12 |
+
for event in pygame.event.get():
|
13 |
+
if event.type == pygame.QUIT:
|
14 |
+
pygame.quit()
|
15 |
+
sys.exit()
|
16 |
+
|
17 |
+
if event.type == pygame.MOUSEBUTTONDOWN:
|
18 |
+
# Insert code here
|
19 |
+
|
20 |
+
if event.type == pygame.KEYDOWN:
|
21 |
+
# Insert code here
|
22 |
+
|
23 |
+
# Insert code here
|
24 |
+
|
25 |
+
pygame.display.flip()
|
26 |
+
clock.tick(60)
|
27 |
+
await asyncio.sleep(0)
|
28 |
+
|
29 |
+
asyncio.run(main())
|