Spaces:
Sleeping
Sleeping
ishworrsubedii
commited on
Commit
·
c0e81fb
1
Parent(s):
2874cad
add: logic file for the code summary and streamlit app
Browse files- app.py +44 -0
- logic.py +86 -0
- requirement.txt +4 -0
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Created By: ishwor subedi
|
3 |
+
Date: 2024-10-03
|
4 |
+
"""
|
5 |
+
import os
|
6 |
+
import streamlit as st
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
from groq import Groq
|
10 |
+
from logic import LLMClient, CodeProcessor
|
11 |
+
|
12 |
+
client = Groq(api_key=os.environ["GROQ_API_KEY"])
|
13 |
+
|
14 |
+
st.title("Code Analysis with LLMs")
|
15 |
+
|
16 |
+
code_input_method = st.radio("How would you like to provide your code?", ("Paste Code", "Upload Code File"))
|
17 |
+
|
18 |
+
code_text = ""
|
19 |
+
if code_input_method == "Paste Code":
|
20 |
+
code_text = st.text_area("Paste your code here:")
|
21 |
+
elif code_input_method == "Upload Code File":
|
22 |
+
uploaded_file = st.file_uploader("Upload your code file", type=["py", "txt"])
|
23 |
+
if uploaded_file is not None:
|
24 |
+
code_text = uploaded_file.read().decode("utf-8")
|
25 |
+
|
26 |
+
model_choice = st.selectbox("Select LLM Model", ["llama3-8b-8192", "gpt-4", "gpt-3.5-turbo"])
|
27 |
+
|
28 |
+
if st.button("Analyze Code") and code_text:
|
29 |
+
llm_obj = LLMClient(client)
|
30 |
+
processor = CodeProcessor(llm_obj)
|
31 |
+
|
32 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
33 |
+
markdown_output = processor.process_code(code_text, model_choice)
|
34 |
+
|
35 |
+
st.markdown(markdown_output)
|
36 |
+
|
37 |
+
st.download_button(
|
38 |
+
label="Download Result as Markdown",
|
39 |
+
data=markdown_output,
|
40 |
+
file_name=f"code_analysis_{timestamp}.md",
|
41 |
+
mime="text/markdown"
|
42 |
+
)
|
43 |
+
else:
|
44 |
+
st.write("Please paste or upload your code to analyze.")
|
logic.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from groq import Groq
|
3 |
+
|
4 |
+
|
5 |
+
class LLMClient:
|
6 |
+
def __init__(self, api_client):
|
7 |
+
self.client = api_client
|
8 |
+
|
9 |
+
def chat_completion(self, messages, model="llama3-8b-8192"):
|
10 |
+
completion = self.client.chat.completions.create(
|
11 |
+
messages=messages,
|
12 |
+
model=model,
|
13 |
+
temperature=0.5,
|
14 |
+
stream=False,
|
15 |
+
)
|
16 |
+
return completion.choices[0].message.content
|
17 |
+
|
18 |
+
|
19 |
+
class CodeProcessor:
|
20 |
+
def __init__(self, llm_obj):
|
21 |
+
self.llm_obj = llm_obj
|
22 |
+
|
23 |
+
def process_code(self, code_text, llm_model):
|
24 |
+
summary_generation = [
|
25 |
+
{
|
26 |
+
"role": "system",
|
27 |
+
"content": "You are a knowledgeable and friendly programming assistant. Your task is to provide concise summaries of code snippets, explaining their purpose, required packages, and the underlying logic in a clear and approachable manner."
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"role": "user",
|
31 |
+
"content": f"Please summarize the following code {code_text}, including its purpose, required packages, and the main logic behind it.",
|
32 |
+
}
|
33 |
+
]
|
34 |
+
|
35 |
+
detail_generation = [
|
36 |
+
{
|
37 |
+
"role": "system",
|
38 |
+
"content": "You are a knowledgeable and friendly programming assistant. Your task is to explain each line of code clearly and concisely, providing a brief explanation for what each line does. Strictly explain each line"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"role": "user",
|
42 |
+
"content": f"Please explain each line of the following code snippet {code_text}, providing a brief explanation of what each line does",
|
43 |
+
}
|
44 |
+
]
|
45 |
+
|
46 |
+
prompt_for_generating_similar_code = [
|
47 |
+
{
|
48 |
+
"role": "system",
|
49 |
+
"content": "You are a code generation assistant. Your task is to analyze the provided code snippet and identify the problem it addresses. Create a prompt that generates similar code with the same functionality. Try to understand what is the application we are building. Focus on understanding the core features, the logic behind the implementation, and any required libraries. Ensure that the prompt captures the essence of what the code is solving without requiring the user to specify the problem."
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"role": "user",
|
53 |
+
"content": f"Here is the code I want to replicate: {code_text}. Please generate a prompt that can produce similar code, focusing on understanding the problem being solved, core features, required packages, and the overall logic behind the implementation without giving a single line of code."
|
54 |
+
}
|
55 |
+
]
|
56 |
+
|
57 |
+
summary_response = self.llm_obj.chat_completion(summary_generation, model=llm_model)
|
58 |
+
print("Summary generation completed............")
|
59 |
+
|
60 |
+
detail_generation_response = self.llm_obj.chat_completion(detail_generation, model=llm_model)
|
61 |
+
print("Detail generation completed............")
|
62 |
+
|
63 |
+
prompt_for_generating_similar_code_response = self.llm_obj.chat_completion(prompt_for_generating_similar_code,
|
64 |
+
model=llm_model)
|
65 |
+
print("Prompt generation completed............")
|
66 |
+
|
67 |
+
markdown_output = f"""
|
68 |
+
# Summary of Code Snippet
|
69 |
+
|
70 |
+
**{summary_response}**
|
71 |
+
|
72 |
+
---
|
73 |
+
|
74 |
+
# Detailed Code Explanation for Snippet
|
75 |
+
|
76 |
+
{detail_generation_response}
|
77 |
+
|
78 |
+
---
|
79 |
+
|
80 |
+
# Similar Code Generation Prompt for Code Snippets
|
81 |
+
|
82 |
+
{prompt_for_generating_similar_code_response}
|
83 |
+
|
84 |
+
---
|
85 |
+
"""
|
86 |
+
return markdown_output
|
requirement.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
groq
|
3 |
+
pandas
|
4 |
+
numpy
|