Abhaykoul commited on
Commit
34a1bb4
β€’
1 Parent(s): 16830f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -25
app.py CHANGED
@@ -1,59 +1,76 @@
1
  import streamlit as st
2
  import google.generativeai as palm
3
  from gradio_client import Client
 
 
4
 
5
  # Initialize the Gradio client with the API URL
6
  client = Client("https://akdeniz27-llama-2-70b-chat-hf-with-easyllm.hf.space/")
7
 
8
  # Information about obtaining a free API key
9
- st.sidebar.info("Get your free palm2 API key at [makersuite.google.com/app/apikey](https://makersuite.google.com/app/apikey)")
10
 
11
  # Ask the user for palm2 API key
12
- api_key_palm2 = st.sidebar.text_input("Enter your palm2 API key for study notes:", type="password")
13
  palm.configure(api_key=api_key_palm2)
14
 
15
  # Styling for the title
16
- st.title("Auto Study Notes Generator")
17
  st.markdown("---")
18
 
19
  # Sidebar for settings
20
- st.sidebar.title("Settings")
21
 
22
  # User choice for mode selection
23
- selected_mode = st.sidebar.radio("Select Mode:", ['Generate Study Notes (Palm2)', 'Use Llama 70b for Notes'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  # Main content area
26
  if selected_mode == 'Generate Study Notes (Palm2)':
27
- st.header("Study Notes Generation (Palm2)")
28
- user_class = st.sidebar.selectbox('Select your class:', ['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6',
29
- 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Class 11', 'Class 12'])
30
- user_input = st.text_input(f'Enter your study topic for {user_class}:')
31
- st.markdown("---")
 
32
 
33
- if st.button('Generate Study Notes'):
34
  if user_input.lower() in ['quit', 'exit', 'bye']:
35
- st.success("Goodbye! Have a great day!")
36
  else:
37
- with st.spinner("Generating study notes. Please wait..."):
38
- st.subheader(f"Making notes for you on '{user_input}'")
39
  prompt = f"Provide study notes for {user_class} on the topic: {user_input}."
40
  response = palm.generate_text(model='models/text-bison-001', prompt=prompt)
41
  study_notes = response.result
42
 
43
  # Display the generated study notes
44
- st.subheader(f"Study Notes for {user_class} - {user_input}:")
45
  st.write(study_notes)
46
 
47
  elif selected_mode == 'Use Llama 70b for Notes':
48
- st.header("Llama 70b Mode")
49
- llama_input = st.text_input('Enter a message for Llama 70b (type "exit" to quit):')
50
- st.markdown("---")
51
 
52
- if st.button('Get Llama 70b Response'):
 
 
 
53
  if llama_input.lower() == 'exit':
54
- st.success("Exiting Llama 70b mode. Have a great day!")
55
  else:
56
- with st.spinner("Getting response from Llama 70b. Please wait..."):
57
  # Make a prediction using Llama 70b API
58
  llama_result = client.predict(
59
  llama_input,
@@ -63,13 +80,17 @@ elif selected_mode == 'Use Llama 70b for Notes':
63
  # Check if the result is not None
64
  if llama_result is not None:
65
  # Display the result
66
- st.subheader("Llama 70b Response:")
67
  st.write(llama_result)
68
  else:
69
- st.warning("Llama 70b API response was None. Please try again later.")
70
 
71
- # Add a footer with updated text
72
  st.sidebar.markdown("---")
 
 
 
 
73
  st.sidebar.text("Β© 2023 HelpingAI")
74
 
75
  # Hide Streamlit menu
@@ -77,4 +98,4 @@ st.markdown("""
77
  <style>
78
  #MainMenu {visibility: hidden;}
79
  </style>
80
- """, unsafe_allow_html=True)
 
1
  import streamlit as st
2
  import google.generativeai as palm
3
  from gradio_client import Client
4
+ import time
5
+ import threading
6
 
7
  # Initialize the Gradio client with the API URL
8
  client = Client("https://akdeniz27-llama-2-70b-chat-hf-with-easyllm.hf.space/")
9
 
10
  # Information about obtaining a free API key
11
+ st.sidebar.info("πŸ”‘ Get your free palm2 API key at [makersuite.google.com/app/apikey](https://makersuite.google.com/app/apikey)")
12
 
13
  # Ask the user for palm2 API key
14
+ api_key_palm2 = st.sidebar.text_input("πŸ”’ Enter your palm2 API key for study notes:", type="password")
15
  palm.configure(api_key=api_key_palm2)
16
 
17
  # Styling for the title
18
+ st.title("πŸš€ Auto Study Notes Generator")
19
  st.markdown("---")
20
 
21
  # Sidebar for settings
22
+ st.sidebar.title("βš™οΈ Settings")
23
 
24
  # User choice for mode selection
25
+ selected_mode = st.sidebar.radio("πŸ” Select Mode:", ['Generate Study Notes (Palm2)', 'Use Llama 70b for Notes'])
26
+
27
+ # Initialize view count
28
+ view_count = 0
29
+
30
+ # Function to simulate live view count
31
+ def update_view_count():
32
+ global view_count
33
+ while True:
34
+ time.sleep(5) # Simulate updates every 5 seconds
35
+ view_count += 1
36
+
37
+ # Start the thread to simulate live view count
38
+ thread = threading.Thread(target=update_view_count)
39
+ thread.start()
40
 
41
  # Main content area
42
  if selected_mode == 'Generate Study Notes (Palm2)':
43
+ st.header("πŸ“š Study Notes Generation (Palm2)")
44
+
45
+ # User input for class and study topic
46
+ user_class = st.sidebar.selectbox('πŸ‘©β€πŸŽ“ Select your class:', ['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6',
47
+ 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Class 11', 'Class 12'])
48
+ user_input = st.text_input(f'✏️ Enter your study topic for {user_class}:', placeholder='e.g., History')
49
 
50
+ if st.button('πŸš€ Generate Study Notes', key="generate_notes", help="Click to generate study notes"):
51
  if user_input.lower() in ['quit', 'exit', 'bye']:
52
+ st.success("πŸ‘‹ Goodbye! Have a great day!")
53
  else:
54
+ with st.spinner("βŒ› Generating study notes. Please wait..."):
 
55
  prompt = f"Provide study notes for {user_class} on the topic: {user_input}."
56
  response = palm.generate_text(model='models/text-bison-001', prompt=prompt)
57
  study_notes = response.result
58
 
59
  # Display the generated study notes
60
+ st.subheader(f"πŸ“š Study Notes for {user_class} - {user_input}")
61
  st.write(study_notes)
62
 
63
  elif selected_mode == 'Use Llama 70b for Notes':
64
+ st.header("πŸ¦™ Llama 70b Mode")
 
 
65
 
66
+ # User input for Llama 70b mode
67
+ llama_input = st.text_input('πŸ’¬ Enter a message for Llama 70b (type "exit" to quit):', placeholder='e.g., Tell me a joke')
68
+
69
+ if st.button('πŸ” Get Llama 70b Response', key="get_llama_response", help="Click to get Llama 70b response"):
70
  if llama_input.lower() == 'exit':
71
+ st.success("πŸ‘‹ Exiting Llama 70b mode. Have a great day!")
72
  else:
73
+ with st.spinner("βŒ› Getting response from Llama 70b. Please wait..."):
74
  # Make a prediction using Llama 70b API
75
  llama_result = client.predict(
76
  llama_input,
 
80
  # Check if the result is not None
81
  if llama_result is not None:
82
  # Display the result
83
+ st.subheader("πŸ¦™ Llama 70b Response")
84
  st.write(llama_result)
85
  else:
86
+ st.warning("⚠️ Llama 70b API response was None. Please try again later.")
87
 
88
+ # Show live view count
89
  st.sidebar.markdown("---")
90
+ st.sidebar.subheader("πŸ‘€ Live View Count:")
91
+ st.sidebar.write(view_count)
92
+
93
+ # Add a footer with updated text
94
  st.sidebar.text("Β© 2023 HelpingAI")
95
 
96
  # Hide Streamlit menu
 
98
  <style>
99
  #MainMenu {visibility: hidden;}
100
  </style>
101
+ """, unsafe_allow_html=True)