File size: 1,549 Bytes
10110b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from llama_cpp.llama import Llama
import streamlit as st

# Load Model
try:
    llm = Llama.from_pretrained(
        repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
        filename="*q4.gguf",
        n_gpu_layers=0,
        n_ctx=2048,
        verbose=False
    )
except Exception as e:
    st.error(f"Error loading model: {e}")
    llm = None

# Streamlit App
st.title("JUnit Test Case Generator")
st.write("Generate JUnit test cases for Java Method using Generative AI.")

# Text Area for Input
java_method = st.text_area("Enter Java Method", height=300)

# Generate Button
if st.button("Generate JUnit Test Cases"):
    if llm:
        if java_method.strip():
            prompt = f"Write JUnit test cases for this function. Provide only the Java code without any explanation.\n\n{java_method}"
            try:
                # Generate test cases
                response = llm.create_chat_completion(
                    messages=[
                        {"role": "user", "content": prompt}
                    ],
                    response_format={"type": "text"},
                    temperature=0.3,
                    top_k=30
                )
                output = response['choices'][0]['message']["content"]
                st.text_area("Generated Test Cases", output, height=300)
            except Exception as e:
                st.error(f"Error generating test cases: {e}")
        else:
            st.warning("Please enter a valid Java Method.")
    else:
        st.error("Model not loaded. Please check the setup.")