File size: 2,819 Bytes
0874f01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import streamlit as st
import html
from openai import OpenAI
import os

# Load environment variables
from dotenv import load_dotenv

load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")

# Initialize the OpenAI client
client = OpenAI(api_key=api_key)

def openai_chat(prompt, chat_log):
    context_messages = [
                           {"role": "system", "content": """You are a gifted C++ professor. You explain complex C++ 
                           concepts clearly using words that a 
    college student would understand, and generate typical exam questions for a C++ course. After a few questions, 
    three or four, check in with the student to ask if you are helpful and if the student is prepared for the exam 
    or stuck on a particular topic, or just needs a cram session before the exam. Be supportive and motivational. 
    Suggest getting a good night's sleep and eating properly before the exam when saying goodbye. After answering 
    a question from the student, suggest three or four C++ final exam questions and related topics when asked anything."""
                            },
                           {"role": "user", "content": "Explain recursion in C++ programming."}
                       ] + chat_log + [{"role": "user", "content": prompt}]

    try:
        completion = client.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=context_messages,
            max_tokens=500
        )
        response_text = html.unescape(completion.choices[0].message.content)
        chat_log.append({"role": "assistant", "content": response_text})
        return response_text, chat_log
    except Exception as e:
        return str(e), chat_log


def format_response(answer):
    # Only apply Markdown to code responses
    if 'int main()' in answer or '#include' in answer or 'std::' in answer:
        code_block = "```cpp\n" + answer + "\n```"
        return code_block
    return answer


def main():
    st.title("Professor CplusPlus")
    st.write("Ask any question about C++, and I'll explain!")

    if 'chat_log' not in st.session_state:
        st.session_state.chat_log = []

    if 'history' not in st.session_state:
        st.session_state.history = ""

    user_input = st.text_input("Type your question here:", key="user_input")

    if st.button("Ask") and user_input:
        answer, st.session_state.chat_log = openai_chat(user_input, st.session_state.chat_log)
        formatted_answer = format_response(answer)
        new_entry = f"Q: {user_input}\n\nA: {formatted_answer}\n\n"
        st.session_state.history = new_entry + st.session_state.history
        st.rerun()  # Using the updated rerun method

    st.write("Chat History:")
    st.markdown(st.session_state.history, unsafe_allow_html=True)


if __name__ == "__main__":
    main()