datawithsuman commited on
Commit
f17dea1
·
verified ·
1 Parent(s): 5d2c737

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -29
app.py CHANGED
@@ -1,14 +1,22 @@
1
  import streamlit as st
2
  import ast
3
  import json
4
- from transformers import AutoTokenizer
5
- import transformers
6
- import torch
 
 
7
 
8
  # import ollama
9
  # from llama_index.llms.ollama import Ollama
10
  # from llama_index.core.llms import ChatMessage
11
 
 
 
 
 
 
 
12
  # Streamlit UI
13
  st.title("Auto Test Case Generation using LLM")
14
 
@@ -38,10 +46,25 @@ if uploaded_files:
38
  return functions
39
 
40
  functions = extract_functions_from_file(file_path)
41
-
42
  list_of_functions = list(functions.keys())
43
  st.write(list_of_functions)
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Initialize session state for chat messages
46
  if "messages" not in st.session_state:
47
  st.session_state.messages = []
@@ -64,18 +87,8 @@ if uploaded_files:
64
  else:
65
  snippet = functions[func]
66
 
67
- # model = "codellama/CodeLlama-7b-Instruct-hf"
68
- model = "TinyLlama/TinyLlama_v1.1"
69
-
70
- tokenizer = AutoTokenizer.from_pretrained(model)
71
- pipeline = transformers.pipeline(
72
- "text-generation",
73
- model=model,
74
- torch_dtype=torch.float16,
75
- device_map="auto",
76
- )
77
-
78
- print('Done')
79
 
80
  # Generation
81
  # resp = ollama.generate(model='codellama',
@@ -84,17 +97,18 @@ if uploaded_files:
84
  # \n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
85
  # \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
86
  # """)
87
- resp = pipeline(
88
- f"""You are a helpful coding assistant. Your task is to generate unit test cases for this function : {snippet}\
89
- \n\nPolitely refuse if the function is not suitable for generating test cases.
90
- \n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
91
- \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
92
- """,
93
- max_new_tokens=1024
94
- )
95
- print('Response ', resp)
96
- resp_list = [n['generated_text'] for n in resp]
97
- response = " ".join(resp_list)
98
- st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})
99
- st.markdown(resp['response'])
 
100
 
 
1
  import streamlit as st
2
  import ast
3
  import json
4
+ import openai
5
+ from llama_index.llms.openai import OpenAI
6
+ import nest_asyncio
7
+
8
+ nest_asyncio.apply()
9
 
10
  # import ollama
11
  # from llama_index.llms.ollama import Ollama
12
  # from llama_index.core.llms import ChatMessage
13
 
14
+
15
+ # OpenAI credentials
16
+ key = os.getenv('OPENAI_API_KEY')
17
+ openai.api_key = key
18
+ os.environ["OPENAI_API_KEY"] = key
19
+
20
  # Streamlit UI
21
  st.title("Auto Test Case Generation using LLM")
22
 
 
46
  return functions
47
 
48
  functions = extract_functions_from_file(file_path)
 
49
  list_of_functions = list(functions.keys())
50
  st.write(list_of_functions)
51
 
52
+ def res(prompt):
53
+
54
+ response = openai.chat.completions.create(
55
+ model=model,
56
+ messages=[
57
+ {"role":"system",
58
+ "content":"You are a helpful coding assistant. Your task is to generate test cases. If the function can't be found, politely refuse"
59
+ },
60
+ {"role": "user",
61
+ "content": prompt,
62
+ }
63
+ ]
64
+ )
65
+
66
+ return response.choices[0].message.content
67
+
68
  # Initialize session state for chat messages
69
  if "messages" not in st.session_state:
70
  st.session_state.messages = []
 
87
  else:
88
  snippet = functions[func]
89
 
90
+ # Generation
91
+ model = "gpt-3.5-turbo"
 
 
 
 
 
 
 
 
 
 
92
 
93
  # Generation
94
  # resp = ollama.generate(model='codellama',
 
97
  # \n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
98
  # \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
99
  # """)
100
+
101
+ prompt=f"""You are a helpful coding assistant. Your task is to generate unit test cases for this function : {snippet}\
102
+ \n\nPolitely refuse if the function is not suitable for generating test cases.
103
+ \n\nGenerate atleast 5 unit test case. Include couple of edge cases as well.
104
+ \n\nThere should be no duplicate test cases. Avoid generating repeated statements.
105
+ """
106
+
107
+ print(prompt)
108
+
109
+ resp = res(prompt)
110
+ st.session_state.messages.append({"role": "assistant", "content": f"{resp}"})
111
+ st.markdown(resp)
112
+ # st.session_state.messages.append({"role": "assistant", "content": f"{resp['response']}"})
113
+ # st.markdown(resp['response'])
114