Spaces:
Sleeping
Sleeping
acecalisto3
commited on
Commit
•
3137439
1
Parent(s):
2b7f3e0
Update app.py
Browse files
app.py
CHANGED
@@ -1,134 +1,132 @@
|
|
1 |
-
import
|
|
|
2 |
import os
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
from
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
def
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
try:
|
22 |
-
response =
|
23 |
-
|
24 |
-
|
25 |
-
# Extract the JSON part
|
26 |
-
json_start = generated_text.index('{')
|
27 |
-
json_end = generated_text.rindex('}') + 1
|
28 |
-
config_str = generated_text[json_start:json_end]
|
29 |
-
|
30 |
-
config = json.loads(config_str)
|
31 |
-
return config
|
32 |
-
except JSONDecodeError as e:
|
33 |
-
st.error(f"Error decoding JSON: {str(e)}")
|
34 |
-
return None
|
35 |
except Exception as e:
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
config_dict = eval(config_json)
|
88 |
-
config = ProjectConfig(**config_dict)
|
89 |
-
structure = agent.create_project_structure(config)
|
90 |
-
st.json(structure)
|
91 |
-
|
92 |
-
elif option == "Implement Feature":
|
93 |
-
feature_description = st.text_area("Enter feature description:")
|
94 |
-
existing_code = st.text_area("Enter existing code (optional):")
|
95 |
-
if st.button("Implement"):
|
96 |
-
new_code = agent.implement_feature(feature_description, existing_code)
|
97 |
-
st.code(new_code)
|
98 |
-
|
99 |
-
elif option == "Review Code":
|
100 |
-
code = st.text_area("Enter code to review:")
|
101 |
-
if st.button("Review"):
|
102 |
-
review = agent.review_code(code)
|
103 |
-
st.write(review)
|
104 |
-
|
105 |
-
elif option == "Optimize Code":
|
106 |
-
code = st.text_area("Enter code to optimize:")
|
107 |
-
optimization_goal = st.text_input("Enter optimization goal:")
|
108 |
-
if st.button("Optimize"):
|
109 |
-
optimized_code = agent.optimize_code(code, optimization_goal)
|
110 |
-
st.code(optimized_code)
|
111 |
-
|
112 |
-
elif option == "Generate Documentation":
|
113 |
-
code = st.text_area("Enter code to document:")
|
114 |
-
if st.button("Generate"):
|
115 |
-
documentation = agent.generate_documentation(code)
|
116 |
-
st.markdown(documentation)
|
117 |
-
|
118 |
-
elif option == "Suggest Tests":
|
119 |
-
code = st.text_area("Enter code to suggest tests for:")
|
120 |
-
if st.button("Suggest"):
|
121 |
-
test_suggestions = agent.suggest_tests(code)
|
122 |
-
st.write(test_suggestions)
|
123 |
-
|
124 |
-
elif option == "Explain Code":
|
125 |
-
code = st.text_area("Enter code to explain:")
|
126 |
-
if st.button("Explain"):
|
127 |
-
explanation = agent.explain_code(code)
|
128 |
-
st.write(explanation)
|
129 |
-
|
130 |
-
elif option == "Suggest Refactoring":
|
131 |
-
code = st.text_area("Enter code to suggest refactoring:")
|
132 |
-
if st.button("Suggest"):
|
133 |
-
refactoring_suggestions = agent.suggest_refactoring(code)
|
134 |
-
st.write(refactoring_suggestions)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ctransformers import AutoModelForCausalLM
|
3 |
import os
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
import logging
|
7 |
+
from threading import Lock
|
8 |
+
|
9 |
+
# Setup logging
|
10 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
11 |
+
|
12 |
+
# Constants
|
13 |
+
MODEL_NAME = "LoneStriker/speechless-zephyr-code-functionary-7b-GGUF"
|
14 |
+
CACHE_DIR = "model_cache"
|
15 |
+
CONFIG_FILE = "config.json"
|
16 |
+
|
17 |
+
class EnhancedChatbot:
|
18 |
+
def __init__(self):
|
19 |
+
self.model = None
|
20 |
+
self.config = self.load_config()
|
21 |
+
self.model_lock = Lock()
|
22 |
+
self.load_model()
|
23 |
+
|
24 |
+
def load_config(self):
|
25 |
+
if os.path.exists(CONFIG_FILE):
|
26 |
+
with open(CONFIG_FILE, 'r') as f:
|
27 |
+
return json.load(f)
|
28 |
+
return {
|
29 |
+
"model_name": MODEL_NAME,
|
30 |
+
"max_tokens": 512,
|
31 |
+
"temperature": 0.7,
|
32 |
+
"top_p": 0.95,
|
33 |
+
"system_message": "You are a friendly and helpful AI assistant.",
|
34 |
+
"gpu_layers": 0
|
35 |
+
}
|
36 |
+
|
37 |
+
def save_config(self):
|
38 |
+
with open(CONFIG_FILE, 'w') as f:
|
39 |
+
json.dump(self.config, f, indent=2)
|
40 |
+
|
41 |
+
def load_model(self):
|
42 |
+
try:
|
43 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
44 |
+
self.config["model_name"],
|
45 |
+
model_type="llama",
|
46 |
+
gpu_layers=self.config["gpu_layers"],
|
47 |
+
cache_dir=CACHE_DIR
|
48 |
+
)
|
49 |
+
logging.info(f"Model loaded successfully: {self.config['model_name']}")
|
50 |
+
except Exception as e:
|
51 |
+
logging.error(f"Error loading model: {str(e)}")
|
52 |
+
raise
|
53 |
+
|
54 |
+
def generate_response(self, message, history, system_message, max_tokens, temperature, top_p):
|
55 |
+
prompt = f"{system_message}\n\n"
|
56 |
+
for user_msg, assistant_msg in history:
|
57 |
+
prompt += f"Human: {user_msg}\nAssistant: {assistant_msg}\n"
|
58 |
+
prompt += f"Human: {message}\nAssistant: "
|
59 |
+
|
60 |
+
start_time = time.time()
|
61 |
+
with self.model_lock:
|
62 |
+
generated_text = self.model(
|
63 |
+
prompt,
|
64 |
+
max_new_tokens=max_tokens,
|
65 |
+
temperature=temperature,
|
66 |
+
top_p=top_p,
|
67 |
+
)
|
68 |
+
end_time = time.time()
|
69 |
+
|
70 |
+
response_time = end_time - start_time
|
71 |
+
logging.info(f"Response generated in {response_time:.2f} seconds")
|
72 |
+
|
73 |
+
return generated_text.strip()
|
74 |
+
|
75 |
+
chatbot = EnhancedChatbot()
|
76 |
+
|
77 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
78 |
try:
|
79 |
+
response = chatbot.generate_response(message, history, system_message, max_tokens, temperature, top_p)
|
80 |
+
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
except Exception as e:
|
82 |
+
logging.error(f"Error generating response: {str(e)}")
|
83 |
+
yield "I apologize, but I encountered an error while processing your request. Please try again."
|
84 |
+
|
85 |
+
def update_model_config(model_name, gpu_layers):
|
86 |
+
chatbot.config["model_name"] = model_name
|
87 |
+
chatbot.config["gpu_layers"] = gpu_layers
|
88 |
+
chatbot.save_config()
|
89 |
+
chatbot.load_model()
|
90 |
+
return f"Model updated to {model_name} with {gpu_layers} GPU layers."
|
91 |
+
|
92 |
+
def update_system_message(system_message):
|
93 |
+
chatbot.config["system_message"] = system_message
|
94 |
+
chatbot .save_config()
|
95 |
+
return f"System message updated: {system_message}"
|
96 |
+
|
97 |
+
with gr.Blocks() as demo:
|
98 |
+
gr.Markdown("# Enhanced AI Chatbot")
|
99 |
+
|
100 |
+
with gr.Tab("Chat"):
|
101 |
+
chatbot_interface= gr.ChatInterface(
|
102 |
+
respond,
|
103 |
+
additional_inputs=[
|
104 |
+
gr.Textbox(value=chatbot.config["system_message"], label="System message"),
|
105 |
+
gr.Slider(minimum=1, maximum=2048, value=chatbot.config["max_tokens"], step=1, label="Max new tokens"),
|
106 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=chatbot.config["temperature"], step=0.1, label="Temperature"),
|
107 |
+
gr.Slider(
|
108 |
+
minimum=0.1,
|
109 |
+
maximum=1.0,
|
110 |
+
value=chatbot.config["top_p"],
|
111 |
+
step=0.05,
|
112 |
+
label="Top-p (nucleus sampling)",
|
113 |
+
),
|
114 |
+
],
|
115 |
+
)
|
116 |
+
|
117 |
+
with gr.Tab("Settings"):
|
118 |
+
with gr.Group():
|
119 |
+
gr.Markdown("### Model Settings")
|
120 |
+
model_name_input = gr.Textbox(value=chatbot.config["model_name"], label="Model name")
|
121 |
+
gpu_layers_input = gr.Slider(minimum=0, maximum=8, value=chatbot.config["gpu_layers"], step=1, label="GPU layers")
|
122 |
+
update_model_button = gr.Button("Update model")
|
123 |
+
update_model_button.click(update_model_config, inputs=[model_name_input, gpu_layers_input], outputs="text")
|
124 |
+
|
125 |
+
with gr.Group():
|
126 |
+
gr.Markdown("### System Message Settings")
|
127 |
+
system_message_input = gr.Textbox(value=chatbot.config["system_message"], label="System message")
|
128 |
+
update_system_message_button = gr.Button("Update system message")
|
129 |
+
update_system_message_button.click(update_system_message, inputs=[system_message_input], outputs="text")
|
130 |
+
|
131 |
+
if __name__ == "__main__":
|
132 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|