Geraldine commited on
Commit
267b371
1 Parent(s): fa15ad3

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +262 -0
  2. clients.py +143 -0
  3. prompts.json +86 -0
  4. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import streamlit as st
3
+ from streamlit_modal import Modal
4
+ import streamlit.components.v1 as components
5
+ from clients import OllamaClient, NvidiaClient, GroqClient
6
+
7
+ st.set_page_config(
8
+ page_title="Prompts Library",
9
+ layout="wide",
10
+ )
11
+
12
+ # Cache the header of the app to prevent re-rendering on each load
13
+ @st.cache_resource
14
+ def display_app_header():
15
+ """Display the header of the Streamlit app."""
16
+ st.title("Prompts Library")
17
+ st.subheader("ChatBot with prompt templates")
18
+
19
+ # Display the header of the app
20
+ display_app_header()
21
+
22
+ # Some style
23
+ st.markdown(
24
+ '<style>div[key^="edit-modal"] {top: 25px;}</style>', unsafe_allow_html=True
25
+ )
26
+
27
+
28
+ # UI sidebar ##########################################
29
+ def ui_llm(provider):
30
+ if api_token := st.sidebar.text_input("Enter your API Key", key=f"API_{provider}"):
31
+ provider_models = llm_providers[st.session_state["llm_provider"]](
32
+ api_key=api_token
33
+ ).list_models_names()
34
+ if provider_models:
35
+ llm = st.sidebar.radio(
36
+ "Select your model", provider_models, key="llm"
37
+ )
38
+ else:
39
+ st.sidebar.error("Ollama is not running, or there is a problem with the selected LLM provider")
40
+ else:
41
+ st.sidebar.warning("You must enter your API key")
42
+
43
+ st.sidebar.subheader("Models")
44
+
45
+ # LLM
46
+ llm_providers = {
47
+ "Cloud Groq": GroqClient,
48
+ "Cloud Nvidia": NvidiaClient,
49
+ "Local Ollama": OllamaClient,
50
+ }
51
+ if llm_provider := st.sidebar.radio(
52
+ "Choose your LLM Provider", llm_providers.keys(), key="llm_provider"
53
+ ):
54
+ ui_llm(st.session_state["llm_provider"])
55
+
56
+ # LLM parameters
57
+ st.sidebar.subheader("Parameters")
58
+ max_tokens = st.sidebar.number_input("Token numbers", value=1024, key="max_tokens")
59
+ temperature = st.sidebar.slider(
60
+ "Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="temperature"
61
+ )
62
+ top_p = st.sidebar.slider(
63
+ "Top P", min_value=0.0, max_value=1.0, value=0.7, step=0.1, key="top_p"
64
+ )
65
+
66
+ # helpers functions ########################################
67
+
68
+ def edit_form(form_name, title=None, source=None, system=None, user=None):
69
+ """
70
+ Creates a form for editing a prompt template.
71
+
72
+ Args:
73
+ form_name: The name of the form.
74
+ title: The title of the prompt template (optional).
75
+ source: The source of the prompt template (optional).
76
+ system: The system example instruction (optional).
77
+ user: The user example instruction (optional).
78
+
79
+ Returns:
80
+ None
81
+ """
82
+ with st.form(form_name, clear_on_submit=False, border=True):
83
+ new_title = st.text_input("Name", value=title)
84
+ new_source = st.text_input("Source", value=source)
85
+ new_system = st.text_area("System example instruction", value=system)
86
+ new_user = st.text_area("User example instruction", value=user)
87
+ if st.form_submit_button("Submit"):
88
+ rec = {
89
+ "title": new_title,
90
+ "source": new_source,
91
+ "messages": [
92
+ {"role": "system", "content": new_system},
93
+ {"role": "user", "content": new_user},
94
+ ],
95
+ }
96
+ if title is not None:
97
+ delete_prompt(title)
98
+ add_prompt(rec)
99
+
100
+
101
+ def read_prompts_file():
102
+ """
103
+ Loads the prompts from the "prompts.json" file.
104
+
105
+ Returns:
106
+ A list of prompt templates.
107
+ """
108
+ prompts_file = open("prompts.json", encoding="utf-8")
109
+ return json.load(prompts_file)
110
+
111
+
112
+ def add_prompt(rec):
113
+ """
114
+ Adds a new prompt to the "prompts.json" file.
115
+
116
+ Args:
117
+ rec: The new prompt to add.
118
+
119
+ Returns:
120
+ None
121
+ """
122
+ with open("prompts.json", "r", encoding="utf-8") as fp:
123
+ listObj = json.load(fp)
124
+ listObj.append(rec)
125
+ print(listObj)
126
+ with open("prompts.json", "w") as outfile:
127
+ outfile.write(json.dumps(listObj, indent=4, sort_keys=True))
128
+ st.rerun()
129
+
130
+
131
+ def edit_prompt(title):
132
+ """
133
+ Edits a prompt template.
134
+
135
+ Args:
136
+ title: The title of the prompt to edit.
137
+
138
+ Returns:
139
+ A dictionary containing the edited prompt information.
140
+ """
141
+ with open("prompts.json", "r", encoding="utf-8") as fp:
142
+ listObj = json.load(fp)
143
+ rec = [i for i in listObj if i["title"].strip() == title.strip()]
144
+ rec_messages = rec[0]["messages"]
145
+ return edit_form(
146
+ "prompt_edit",
147
+ title=title,
148
+ source=[x["source"] for x in rec][0],
149
+ system=[x["content"] for x in rec_messages if x["role"] == "system"][0],
150
+ user=[x["content"] for x in rec_messages if x["role"] == "user"][0],
151
+ )
152
+
153
+
154
+ def delete_prompt(title):
155
+ """
156
+ Removes a prompt template from the "prompts.json" file.
157
+
158
+ Args:
159
+ title: The title of the prompt to delete.
160
+ """
161
+ with open("prompts.json", "r", encoding="utf-8") as fp:
162
+ listObj = json.load(fp)
163
+ recs = [i for i in listObj if not (i["title"].strip() == title.strip())]
164
+ with open("prompts.json", "w") as outfile:
165
+ outfile.write(json.dumps(recs, indent=4, sort_keys=True))
166
+
167
+
168
+ def get_llm_response(system, prompt):
169
+ """
170
+ Generates a response from the selected LLM.
171
+
172
+ Args:
173
+ system: The system input from the user.
174
+ prompt: The user prompt.
175
+
176
+ Returns:
177
+ The response from the LLM.
178
+ """
179
+ options = dict(
180
+ max_tokens=st.session_state["max_tokens"],
181
+ top_p=st.session_state["top_p"],
182
+ temperature=st.session_state["temperature"],
183
+ )
184
+ return llm_providers[st.session_state["llm_provider"]](
185
+ api_key=st.session_state[f"API_{st.session_state['llm_provider']}"],
186
+ model=st.session_state["llm"],
187
+ ).api_chat_completion(system, prompt, **options)
188
+
189
+
190
+ def generate(system, prompt):
191
+ st.session_state.messages.append({"role": "system", "content": system})
192
+ st.session_state.messages.append({"role": "user", "content": prompt})
193
+ with st.chat_message("assistant"):
194
+ response = get_llm_response(
195
+ llm_providers[st.session_state["llm_provider"]], system, prompt
196
+ )
197
+ st.markdown(response)
198
+ # Add assistant response to chat history
199
+ st.session_state.messages.append({"role": "assistant", "content": response})
200
+
201
+
202
+ # UI main #####################################################
203
+
204
+ tab1, tab2 = st.tabs(["Prompts Library", "Chatbot"])
205
+ with tab1:
206
+ new_modal = Modal(
207
+ "Add prompt",
208
+ key="edit-modal",
209
+ )
210
+ if new_prompt_modal := st.button("➕ Add a prompt template"):
211
+ new_modal.open()
212
+ if new_modal.is_open():
213
+ with new_modal.container():
214
+ edit_form("prompt_add")
215
+ prompts = read_prompts_file()
216
+ grids = range(1, len(prompts) + 1)
217
+ cols = st.columns([1, 1])
218
+ wcol = 2
219
+ for f, b in zip(prompts, grids):
220
+ col = cols[b % wcol]
221
+ with col:
222
+ with st.expander(f["title"].upper()):
223
+ if st.button(f"✔️ Select prompt {f['title'].upper()} and go to Chatbot tab", type="secondary"):
224
+ # can do better here
225
+ st.session_state["init_messages"] = f["messages"]
226
+ st.session_state.init_system = f["messages"][0]["content"]
227
+ st.session_state.init_user = f["messages"][1]["content"]
228
+ edit_modal = Modal(
229
+ f"Edit prompt {f['title'].upper()}",
230
+ key=f"edit-modal_{f['title']}",
231
+ )
232
+ if edit_prompt_modal := st.button(
233
+ f"✏️ Edit {f['title'].upper()}", type="secondary"
234
+ ):
235
+ edit_modal.open()
236
+ if edit_modal.is_open():
237
+ with edit_modal.container():
238
+ edit_prompt(f["title"])
239
+ st.write(f"Source : {f['source']}")
240
+ st.markdown(f"- System : {f['messages'][0]['content']}")
241
+ st.markdown(f"- User: {f['messages'][1]['content']}")
242
+ st.divider()
243
+ if st.button(f"❌ Delete prompt {f['title'].upper()}", type="primary"):
244
+ delete_prompt(f["title"])
245
+ st.rerun()
246
+ with tab2:
247
+ # Initialize chat history
248
+ if "messages" not in st.session_state:
249
+ st.session_state.messages = []
250
+ # Display chat messages from history on app rerun
251
+ for message in st.session_state.messages:
252
+ with st.chat_message(message["role"]):
253
+ st.markdown(message["content"])
254
+ # React to user input
255
+ if "init_messages" in st.session_state:
256
+ system = st.text_area(":blue[System]", key="init_system")
257
+ prompt = st.text_area(":blue[User]", key="init_user")
258
+ else:
259
+ system = st.text_area(":blue[System]")
260
+ prompt = st.text_area(":blue[User]")
261
+ if st.button("Generate", type="primary"):
262
+ generate(system, prompt)
clients.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+
5
+ class OllamaClient:
6
+ def __init__(
7
+ self,
8
+ api_key=None,
9
+ model=None,
10
+ ):
11
+ self.base_url = "http://localhost:11434"
12
+ self.headers = {"Content-Type": "application/json"}
13
+ self.api_key = api_key
14
+ self.model = model
15
+
16
+ def list_models(self):
17
+ url = f"{self.base_url}/api/tags"
18
+ try:
19
+ response = requests.get(url)
20
+ response.raise_for_status() # Raise an exception for HTTP errors (status codes 4xx and 5xx)
21
+ return response.json() # returns the response is in JSON format
22
+ except requests.exceptions.HTTPError as http_err:
23
+ print(f"HTTP error occurred: {http_err}")
24
+ except Exception as err:
25
+ print(f"Other error occurred: {err}")
26
+
27
+ def list_models_names(self):
28
+ models = self.list_models()
29
+ return [x["name"] for x in models["models"]]
30
+
31
+ def api_chat_completion(self, system, prompt, **options):
32
+ url = f"{self.base_url}/api/chat"
33
+ options = (
34
+ options
35
+ if options is not None
36
+ else {"max_tokens": 1024, "top_p": 0.7, "temperature": 0.7}
37
+ )
38
+ payload = json.dumps(
39
+ {
40
+ "model": self.model,
41
+ "messages": [
42
+ {"role": "system", "content": system},
43
+ {"role": "user", "content": prompt},
44
+ ],
45
+ "option": {
46
+ "num_ctx": self.options["max_tokens"],
47
+ "top_p": self.options["top_p"],
48
+ "temperature": self.options["temperature"],
49
+ # stop_sequences=["<|prompter|>","<|assistant|>","</s>"]
50
+ },
51
+ "stream": False,
52
+ }
53
+ )
54
+ response = requests.request("POST", url, headers=self.headers, data=payload)
55
+ return response.json()["message"]["content"]
56
+
57
+
58
+ class NvidiaClient:
59
+ def __init__(self, api_key=None, model=None):
60
+ self.base_url = "https://integrate.api.nvidia.com/v1"
61
+ self.api_key = api_key
62
+ self.headers = {
63
+ "Content-Type": "application/json",
64
+ "Authorization": f"Bearer {self.api_key}",
65
+ }
66
+ self.model = model
67
+
68
+ def list_models(self):
69
+ url = f"{self.base_url}/models"
70
+ response = requests.request(
71
+ "GET", url
72
+ ) # api_key is not needed to list the available models
73
+ return response.json()
74
+
75
+ def list_models_names(self):
76
+ models = self.list_models()
77
+ return [x["id"] for x in models["data"]]
78
+
79
+ def api_chat_completion(self, system, prompt, **options):
80
+ url = f"{self.base_url}/chat/completions"
81
+ options = (
82
+ options
83
+ if options is not None
84
+ else {"max_tokens": 1024, "top_p": 0.7, "temperature": 0.7}
85
+ )
86
+ payload = json.dumps(
87
+ {
88
+ "model": self.model,
89
+ "messages": [
90
+ {"role": "system", "content": system},
91
+ {"role": "user", "content": prompt},
92
+ ],
93
+ "temperature": options["temperature"],
94
+ "top_p": options["top_p"],
95
+ "max_tokens": options["max_tokens"],
96
+ "stream": False,
97
+ }
98
+ )
99
+ response = requests.request("POST", url, headers=self.headers, data=payload)
100
+ return response.json()["choices"][0]["message"]["content"]
101
+
102
+
103
+ class GroqClient:
104
+ def __init__(self, api_key=None, model=None):
105
+ self.base_url = "https://api.groq.com/openai/v1"
106
+ self.api_key = api_key
107
+ self.headers = {
108
+ "Content-Type": "application/json",
109
+ "Authorization": f"Bearer {self.api_key}",
110
+ }
111
+ self.model = model
112
+
113
+ def list_models(self):
114
+ url = f"{self.base_url}/models"
115
+ response = requests.request("GET", url, headers=self.headers)
116
+ return response.json()
117
+
118
+ def list_models_names(self):
119
+ models = self.list_models()
120
+ return [x["id"] for x in models["data"]]
121
+
122
+ def api_chat_completion(self, system, prompt, **options):
123
+ url = f"{self.base_url}/chat/completions"
124
+ options = (
125
+ options
126
+ if options is not None
127
+ else {"max_tokens": 1024, "top_p": 0.7, "temperature": 0.7}
128
+ )
129
+ payload = json.dumps(
130
+ {
131
+ "model": self.model,
132
+ "messages": [
133
+ {"role": "system", "content": system},
134
+ {"role": "user", "content": prompt},
135
+ ],
136
+ "temperature": options["temperature"],
137
+ "top_p": options["top_p"],
138
+ "max_tokens": options["max_tokens"],
139
+ "stream": False,
140
+ }
141
+ )
142
+ response = requests.request("POST", url, headers=self.headers, data=payload)
143
+ return response.json()["choices"][0]["message"]["content"]
prompts.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "messages": [
4
+ {
5
+ "content": "Your task is to take the code snippet provided and explain it in simple, easy-to-understand language. Break down the code's functionality, purpose, and key components. Use analogies, examples, and plain terms to make the explanation accessible to someone with minimal coding knowledge. Avoid using technical jargon unless absolutely necessary, and provide clear explanations for any jargon used. The goal is to help the reader understand what the code does and how it works at a high level.",
6
+ "role": "system"
7
+ },
8
+ {
9
+ "content": "import random def bubble*sort(arr): n = len(arr) for i in range(n-1): for j in range(n-i-1): if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] return arr numbers = [random.randint(1, 100) for * in range(10)] print(\u201cUnsorted array:\u201d, numbers) sorted_numbers = bubble_sort(numbers) print(\u201cSorted array:\u201d, sorted_numbers)",
10
+ "role": "user"
11
+ }
12
+ ],
13
+ "source": "Anthropic https://docs.anthropic.com/en/prompt-library",
14
+ "title": "code-clarifier"
15
+ },
16
+ {
17
+ "messages": [
18
+ {
19
+ "content": "Your task is to take the unstructured text provided and convert it into a well-organized table format using JSON. Identify the main entities, attributes, or categories mentioned in the text and use them as keys in the JSON object. Then, extract the relevant information from the text and populate the corresponding values in the JSON object. Ensure that the data is accurately represented and properly formatted within the JSON structure. The resulting JSON table should provide a clear, structured overview of the information presented in the original text.",
20
+ "role": "system"
21
+ },
22
+ {
23
+ "content": "Silvermist Hollow, a charming village, was home to an extraordinary group of individuals. Among them was Dr. Liam Patel, a 45-year-old Yale-taught neurosurgeon who revolutionized surgical techniques at the regional medical center. Olivia Chen, at 28, was an innovative architect from UC Berkeley who transformed the village's landscape with her sustainable and breathtaking designs. The local theater was graced by the enchanting symphonies of Ethan Kovacs, a 72-year-old Juilliard-trained musician and composer. Isabella Torres, a self-taught chef with a passion for locally sourced ingredients, created a culinary sensation with her farm-to-table restaurant, which became a must-visit destination for food lovers. These remarkable individuals, each with their distinct talents, contributed to the vibrant tapestry of life in Silvermist Hollow.",
24
+ "role": "user"
25
+ }
26
+ ],
27
+ "source": "Anthropic https://docs.anthropic.com/en/prompt-library",
28
+ "title": "data extractor"
29
+ },
30
+ {
31
+ "messages": [
32
+ {
33
+ "content": "You are an AI assistant with expertise in LaTeX, a document preparation system widely used for academic and technical writing. Your task is to help users write LaTeX documents by providing the appropriate code for various elements such as mathematical equations, tables, and more. Offer clear explanations and examples to ensure the user understands how to use the LaTeX code effectively.",
34
+ "role": "system"
35
+ },
36
+ {
37
+ "content": "I need to create a simple table with three columns and two rows. The header row should contain the titles \u201cName,\u201d \u201cAge,\u201d and \u201cCity.\u201d The data row should have the values \u201cJohn,\u201d \u201c25,\u201d and \u201cNew York.\u201d",
38
+ "role": "user"
39
+ }
40
+ ],
41
+ "source": "Anthropic https://docs.anthropic.com/en/prompt-library/latex-legend",
42
+ "title": "LaTeX Expert"
43
+ },
44
+ {
45
+ "messages": [
46
+ {
47
+ "content": "Your task is to review the provided meeting notes and create a concise summary that captures the essential information, focusing on key takeaways and action items assigned to specific individuals or departments during the meeting. Use clear and professional language, and organize the summary in a logical manner using appropriate formatting such as headings, subheadings, and bullet points. Ensure that the summary is easy to understand and provides a comprehensive but succinct overview of the meeting\u2019s content, with a particular focus on clearly indicating who is responsible for each action item.",
48
+ "role": "system"
49
+ },
50
+ {
51
+ "content": "Meeting notes:\n\nDate: Verona, Italy - Late 16th century\n\nAttendees:\n- Lord Capulet (Head of the Capulet family)\n- Lord Montague (Head of the Montague family)\n- Prince Escalus (Ruler of Verona)\n- Friar Laurence (Religious advisor)\n\nAgenda:\n1. Address the ongoing feud between the Capulet and Montague families\n2. Discuss the secret marriage of Romeo Montague and Juliet Capulet\n3. Develop a plan to bring peace to Verona\n4. Address the tragic deaths of Romeo and Juliet\n\nDiscussion:\n- Prince Escalus opened the meeting by expressing his grave concern over the long-standing feud between the Capulet and Montague families. He admonished both Lord Capulet and Lord Montague for the recent violent clashes that have disturbed the peace in Verona\u2019s streets. The Prince warned that further violence would result in severe consequences, including heavy fines and potential exile for the perpetrators.\n- Friar Laurence then broached the topic of the between Romeo Montague and Juliet Capulet, which had taken place under his guidance. Lord Capulet and Lord Montague evidently had not known about it, and reacted with anger and disbelief. However, Friar Laurence urged them to consider the profound and tragic love shared by their children and the potential for this love to heal the rift between the families going forward.\n- Prince Escalus proposed a formal truce between the Capulet and Montague families. He demanded that both sides lay down their arms and cease all hostile actions against one another. The Prince declared that any violation of the truce would result in severe punishments, including the possibility of exile or even execution. Lord Capulet and Lord Montague, recognizing the wisdom in the Prince\u2019s words and the necessity of peace for the well-being of their families and the city, grudgingly agreed to the terms of the truce.\n- The meeting took a somber turn as the tragic deaths of Romeo and Juliet were addressed. Friar Laurence recounted the unfortunate series of events that led to the young lovers taking their own lives, emphasizing the devastating impact of the families\u2019 hatred on their innocent children. Lord Capulet and Lord Montague, overcome with grief and remorse, acknowledged that their blind hatred had ultimately caused the loss of their beloved children.\n- Prince Escalus called upon the families to learn from this heartbreaking tragedy and to embrace forgiveness and unity in honor of Romeo and Juliet\u2019s memory. He urged them to work together to create a lasting peace in Verona, setting aside their long-standing animosity. Friar Laurence offered his support in mediating any future disputes and providing spiritual guidance to help the families heal and move forward.\n- As the meeting drew to a close, Lord Capulet and Lord Montague pledged to put an end to their feud and work towards reconciliation. Prince Escalus reaffirmed his commitment to ensuring that the truce would be upheld, promising swift justice for any who dared to break it.\n- The attendees agreed to meet regularly to discuss the progress of their reconciliation efforts and to address any challenges that may arise.",
52
+ "role": "user"
53
+ }
54
+ ],
55
+ "source": "Anthropic https://docs.anthropic.com/en/prompt-library/meeting-scribe",
56
+ "title": "Meeting notes rewriter"
57
+ },
58
+ {
59
+ "messages": [
60
+ {
61
+ "content": "Your task is to analyze the provided Python code snippet and suggest improvements to optimize its performance. Identify areas where the code can be made more efficient, faster, or less resource-intensive. Provide specific suggestions for optimization, along with explanations of how these changes can enhance the code\u2019s performance. The optimized code should maintain the same functionality as the original code while demonstrating improved efficiency.",
62
+ "role": "system"
63
+ },
64
+ {
65
+ "content": "def fibonacci(n):\nif n <= 0:\nreturn []\nelif n == 1:\nreturn [0]\nelif n == 2:\nreturn [0, 1]\nelse:\nfib = [0, 1]\nfor i in range(2, n):\nfib.append(fib[i-1] + fib[i-2])\nreturn fib",
66
+ "role": "user"
67
+ }
68
+ ],
69
+ "source": "Anthropic https://docs.anthropic.com/en/prompt-library/code-consultant",
70
+ "title": "Code consultant"
71
+ },
72
+ {
73
+ "messages": [
74
+ {
75
+ "content": "test",
76
+ "role": "system"
77
+ },
78
+ {
79
+ "content": "test",
80
+ "role": "user"
81
+ }
82
+ ],
83
+ "source": "ys",
84
+ "title": "test"
85
+ }
86
+ ]
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ requests
2
+ streamlit
3
+ streamlit-modal